summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/core-api/printk-formats.rst18
-rw-r--r--Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/fsl,lcdif.yaml110
-rw-r--r--Documentation/devicetree/bindings/display/mxsfb.txt87
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst12
-rw-r--r--Documentation/gpu/todo.rst74
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/dma-buf/dma-heap.c12
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c1
-rw-r--r--drivers/dma-buf/heaps/system_heap.c1
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c28
-rw-r--r--drivers/gpu/drm/arc/Kconfig10
-rw-r--r--drivers/gpu/drm/arc/Makefile3
-rw-r--r--drivers/gpu/drm/arc/arcpgu.h37
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c217
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c224
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c48
-rw-r--r--drivers/gpu/drm/arc/arcpgu_regs.h31
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c108
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h11
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c21
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c30
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c7
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c79
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c113
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c115
-rw-r--r--drivers/gpu/drm/armada/armada_plane.h2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx.h8
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c15
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c69
-rw-r--r--drivers/gpu/drm/ast/Makefile3
-rw-r--r--drivers/gpu/drm/ast/ast_cursor.c286
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h47
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c388
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c107
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h7
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c135
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c47
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c75
-rw-r--r--drivers/gpu/drm/drm_crtc.c7
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c35
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c11
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c432
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c63
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c42
-rw-r--r--drivers/gpu/drm/drm_ioc32.c15
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c5
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c14
-rw-r--r--drivers/gpu/drm/drm_plane.c66
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c50
-rw-r--r--drivers/gpu/drm/drm_syncobj.c12
-rw-r--r--drivers/gpu/drm/drm_vblank.c25
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c20
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c24
-rw-r--r--drivers/gpu/drm/gma500/Kconfig11
-rw-r--r--drivers/gpu/drm/gma500/Makefile17
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c60
-rw-r--r--drivers/gpu/drm/gma500/blitter.c43
-rw-r--r--drivers/gpu/drm/gma500/blitter.h16
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c15
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c23
-rw-r--r--drivers/gpu/drm/gma500/gtt.c11
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c4
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c36
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h6
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h32
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c110
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c39
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c6
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c64
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c92
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c55
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-ipu.c77
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c50
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.c11
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.h2
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c6
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c10
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c103
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c31
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c51
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c25
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c54
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c57
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c23
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.h8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c56
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c2
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c11
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c39
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c368
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c30
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c57
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h7
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c76
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c19
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c40
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c81
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c22
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c41
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c77
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c75
-rw-r--r--drivers/gpu/drm/stm/ltdc.c84
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c15
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c59
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.h5
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c78
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h11
-rw-r--r--drivers/gpu/drm/tegra/dc.c116
-rw-r--r--drivers/gpu/drm/tegra/drm.c3
-rw-r--r--drivers/gpu/drm/tegra/hub.c62
-rw-r--r--drivers/gpu/drm/tegra/plane.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c53
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c20
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c46
-rw-r--r--drivers/gpu/drm/tiny/Kconfig10
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/arcpgu.c434
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c43
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c28
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c4
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c4
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c4
-rw-r--r--drivers/gpu/drm/tiny/repaper.c3
-rw-r--r--drivers/gpu/drm/tiny/st7586.c4
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c4
-rw-r--r--drivers/gpu/drm/ttm/Makefile7
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c335
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c25
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c24
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c205
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c54
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.h8
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c157
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c129
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c4
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c34
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c42
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c82
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c76
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c19
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c8
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c30
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.c (renamed from drivers/gpu/drm/ttm/ttm_memory.c)22
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.h (renamed from include/drm/ttm/ttm_memory.h)5
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c103
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c5
-rw-r--r--drivers/gpu/drm/xen/Kconfig10
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c3
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c32
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c49
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c85
-rw-r--r--drivers/video/fbdev/amba-clcd.c17
-rw-r--r--drivers/video/fbdev/efifb.c3
-rw-r--r--drivers/video/fbdev/omap/hwa742.c42
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c2
-rw-r--r--drivers/video/fbdev/simplefb.c5
-rw-r--r--include/drm/drm_atomic.h4
-rw-r--r--include/drm/drm_gem_atomic_helper.h113
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h7
-rw-r--r--include/drm/drm_gem_vram_helper.h6
-rw-r--r--include/drm/drm_modeset_helper_vtables.h31
-rw-r--r--include/drm/drm_plane.h25
-rw-r--r--include/drm/drm_simple_kms_helper.h29
-rw-r--r--include/drm/drm_vblank.h1
-rw-r--r--include/drm/gpu_scheduler.h23
-rw-r--r--include/drm/ttm/ttm_bo_api.h48
-rw-r--r--include/drm/ttm/ttm_bo_driver.h329
-rw-r--r--include/drm/ttm/ttm_device.h318
-rw-r--r--include/drm/ttm/ttm_resource.h4
-rw-r--r--include/drm/ttm/ttm_tt.h15
-rw-r--r--include/linux/dma-heap.h9
-rw-r--r--include/linux/hdmi.h2
-rw-r--r--include/linux/lockdep.h5
-rw-r--r--include/linux/platform_data/simplefb.h1
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--lib/test_printf.c18
-rw-r--r--lib/vsprintf.c39
-rwxr-xr-xscripts/checkpatch.pl6
286 files changed, 5431 insertions, 4790 deletions
diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
index 160e710d992f..8267675ea95c 100644
--- a/Documentation/core-api/printk-formats.rst
+++ b/Documentation/core-api/printk-formats.rst
@@ -567,6 +567,24 @@ For printing netdev_features_t.
Passed by reference.
+V4L2 and DRM FourCC code (pixel format)
+---------------------------------------
+
+::
+
+ %p4cc
+
+Print a FourCC code used by V4L2 or DRM, including format endianness and
+its numerical value as hexadecimal.
+
+Passed by reference.
+
+Examples::
+
+ %p4cc BG12 little-endian (0x32314742)
+ %p4cc Y10 little-endian (0x20303159)
+ %p4cc NV12 big-endian (0xb231564e)
+
Thanks
======
diff --git a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
index a1d5a32660e0..57324a5f0271 100644
--- a/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/brcm,bcm2711-hdmi.yaml
@@ -109,7 +109,7 @@ required:
- resets
- ddc
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/display/fsl,lcdif.yaml b/Documentation/devicetree/bindings/display/fsl,lcdif.yaml
new file mode 100644
index 000000000000..a4c3064c778c
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/fsl,lcdif.yaml
@@ -0,0 +1,110 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/fsl,lcdif.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale/NXP i.MX LCD Interface (LCDIF)
+
+maintainers:
+ - Marek Vasut <marex@denx.de>
+ - Stefan Agner <stefan@agner.ch>
+
+description: |
+ (e)LCDIF display controller found in the Freescale/NXP i.MX SoCs.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - fsl,imx23-lcdif
+ - fsl,imx28-lcdif
+ - fsl,imx6sx-lcdif
+ - items:
+ - enum:
+ - fsl,imx6sl-lcdif
+ - fsl,imx6sll-lcdif
+ - fsl,imx6ul-lcdif
+ - fsl,imx7d-lcdif
+ - fsl,imx8mm-lcdif
+ - fsl,imx8mq-lcdif
+ - const: fsl,imx6sx-lcdif
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ items:
+ - description: Pixel clock
+ - description: Bus clock
+ - description: Display AXI clock
+ minItems: 1
+
+ clock-names:
+ items:
+ - const: pix
+ - const: axi
+ - const: disp_axi
+ minItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: The LCDIF output port
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+ - port
+
+additionalProperties: false
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,imx6sx-lcdif
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 3
+ clock-names:
+ minItems: 2
+ maxItems: 3
+ required:
+ - clock-names
+ else:
+ properties:
+ clocks:
+ maxItems: 1
+ clock-names:
+ maxItems: 1
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx6sx-clock.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ display-controller@2220000 {
+ compatible = "fsl,imx6sx-lcdif";
+ reg = <0x02220000 0x4000>;
+ interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SX_CLK_LCDIF1_PIX>,
+ <&clks IMX6SX_CLK_LCDIF_APB>,
+ <&clks IMX6SX_CLK_DISPLAY_AXI>;
+ clock-names = "pix", "axi", "disp_axi";
+
+ port {
+ endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/mxsfb.txt b/Documentation/devicetree/bindings/display/mxsfb.txt
deleted file mode 100644
index c985871c46b3..000000000000
--- a/Documentation/devicetree/bindings/display/mxsfb.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-* Freescale MXS LCD Interface (LCDIF)
-
-New bindings:
-=============
-Required properties:
-- compatible: Should be "fsl,imx23-lcdif" for i.MX23.
- Should be "fsl,imx28-lcdif" for i.MX28.
- Should be "fsl,imx6sx-lcdif" for i.MX6SX.
- Should be "fsl,imx8mq-lcdif" for i.MX8MQ.
-- reg: Address and length of the register set for LCDIF
-- interrupts: Should contain LCDIF interrupt
-- clocks: A list of phandle + clock-specifier pairs, one for each
- entry in 'clock-names'.
-- clock-names: A list of clock names. For MXSFB it should contain:
- - "pix" for the LCDIF block clock
- - (MX6SX-only) "axi", "disp_axi" for the bus interface clock
-
-Required sub-nodes:
- - port: The connection to an encoder chip.
-
-Example:
-
- lcdif1: display-controller@2220000 {
- compatible = "fsl,imx6sx-lcdif", "fsl,imx28-lcdif";
- reg = <0x02220000 0x4000>;
- interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&clks IMX6SX_CLK_LCDIF1_PIX>,
- <&clks IMX6SX_CLK_LCDIF_APB>,
- <&clks IMX6SX_CLK_DISPLAY_AXI>;
- clock-names = "pix", "axi", "disp_axi";
-
- port {
- parallel_out: endpoint {
- remote-endpoint = <&panel_in_parallel>;
- };
- };
- };
-
-Deprecated bindings:
-====================
-Required properties:
-- compatible: Should be "fsl,imx23-lcdif" for i.MX23.
- Should be "fsl,imx28-lcdif" for i.MX28.
-- reg: Address and length of the register set for LCDIF
-- interrupts: Should contain LCDIF interrupts
-- display: phandle to display node (see below for details)
-
-* display node
-
-Required properties:
-- bits-per-pixel: <16> for RGB565, <32> for RGB888/666.
-- bus-width: number of data lines. Could be <8>, <16>, <18> or <24>.
-
-Required sub-node:
-- display-timings: Refer to binding doc display-timing.txt for details.
-
-Examples:
-
-lcdif@80030000 {
- compatible = "fsl,imx28-lcdif";
- reg = <0x80030000 2000>;
- interrupts = <38 86>;
-
- display: display {
- bits-per-pixel = <32>;
- bus-width = <24>;
-
- display-timings {
- native-mode = <&timing0>;
- timing0: timing0 {
- clock-frequency = <33500000>;
- hactive = <800>;
- vactive = <480>;
- hfront-porch = <164>;
- hback-porch = <89>;
- hsync-len = <10>;
- vback-porch = <23>;
- vfront-porch = <10>;
- vsync-len = <10>;
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <0>;
- };
- };
- };
-};
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index b89ddd06dabb..389892f36185 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -80,6 +80,18 @@ Atomic State Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c
:export:
+GEM Atomic Helper Reference
+---------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c
+ :doc: overview
+
+.. kernel-doc:: include/drm/drm_gem_atomic_helper.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c
+ :export:
+
Simple KMS Helper Reference
===========================
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 22ce801e3a8d..1b4b64b71c7e 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -459,52 +459,6 @@ Contact: Emil Velikov, respective driver maintainers
Level: Intermediate
-Plumb drm_atomic_state all over
--------------------------------
-
-Currently various atomic functions take just a single or a handful of
-object states (eg. plane state). While that single object state can
-suffice for some simple cases, we often have to dig out additional
-object states for dealing with various dependencies between the individual
-objects or the hardware they represent. The process of digging out the
-additional states is rather non-intuitive and error prone.
-
-To fix that most functions should rather take the overall
-drm_atomic_state as one of their parameters. The other parameters
-would generally be the object(s) we mainly want to interact with.
-
-For example, instead of
-
-.. code-block:: c
-
- int (*atomic_check)(struct drm_plane *plane, struct drm_plane_state *state);
-
-we would have something like
-
-.. code-block:: c
-
- int (*atomic_check)(struct drm_plane *plane, struct drm_atomic_state *state);
-
-The implementation can then trivially gain access to any required object
-state(s) via drm_atomic_get_plane_state(), drm_atomic_get_new_plane_state(),
-drm_atomic_get_old_plane_state(), and their equivalents for
-other object types.
-
-Additionally many drivers currently access the object->state pointer
-directly in their commit functions. That is not going to work if we
-eg. want to allow deeper commit pipelines as those pointers could
-then point to the states corresponding to a future commit instead of
-the current commit we're trying to process. Also non-blocking commits
-execute locklessly so there are serious concerns with dereferencing
-the object->state pointers without holding the locks that protect them.
-Use of drm_atomic_get_new_plane_state(), drm_atomic_get_old_plane_state(),
-etc. avoids these problems as well since they relate to a specific
-commit via the passed in drm_atomic_state.
-
-Contact: Ville Syrjälä, Daniel Vetter
-
-Level: Intermediate
-
Use struct dma_buf_map throughout codebase
------------------------------------------
@@ -596,20 +550,24 @@ Contact: Daniel Vetter
Level: Intermediate
-KMS cleanups
-------------
+Object lifetime fixes
+---------------------
+
+There's two related issues here
+
+- Cleanup up the various ->destroy callbacks, which often are all the same
+ simple code.
-Some of these date from the very introduction of KMS in 2008 ...
+- Lots of drivers erroneously allocate DRM modeset objects using devm_kzalloc,
+ which results in use-after free issues on driver unload. This can be serious
+ trouble even for drivers for hardware integrated on the SoC due to
+ EPROBE_DEFERRED backoff.
-- Make ->funcs and ->helper_private vtables optional. There's a bunch of empty
- function tables in drivers, but before we can remove them we need to make sure
- that all the users in helpers and drivers do correctly check for a NULL
- vtable.
+Both these problems can be solved by switching over to drmm_kzalloc(), and the
+various convenience wrappers provided, e.g. drmm_crtc_alloc_with_planes(),
+drmm_universal_plane_alloc(), ... and so on.
-- Cleanup up the various ->destroy callbacks. A lot of them just wrapt the
- drm_*_cleanup implementations and can be removed. Some tack a kfree() at the
- end, for which we could add drm_*_cleanup_kfree(). And then there's the (for
- historical reasons) misnamed drm_primary_helper_destroy() function.
+Contact: Daniel Vetter
Level: Intermediate
@@ -666,8 +624,6 @@ See the documentation of :ref:`VKMS <vkms>` for more details. This is an ideal
internship task, since it only requires a virtual machine and can be sized to
fit the available time.
-Contact: Daniel Vetter
-
Level: See details
Backlight Refactoring
diff --git a/MAINTAINERS b/MAINTAINERS
index aa84121c5611..fbae9a19d017 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1323,7 +1323,7 @@ ARC PGU DRM DRIVER
M: Alexey Brodkin <abrodkin@synopsys.com>
S: Supported
F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
-F: drivers/gpu/drm/arc/
+F: drivers/gpu/drm/tiny/arcpgu.c
ARCNET NETWORK LAYER
M: Michael Grzeschik <m.grzeschik@pengutronix.de>
@@ -12267,7 +12267,7 @@ M: Stefan Agner <stefan@agner.ch>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
-F: Documentation/devicetree/bindings/display/mxsfb.txt
+F: Documentation/devicetree/bindings/display/fsl,lcdif.yaml
F: drivers/gpu/drm/mxsfb/
MYLEX DAC960 PCI RAID Controller
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 6b5db954569f..56bf5ad01ad5 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -202,6 +202,18 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
return heap->priv;
}
+/**
+ * dma_heap_get_name() - get heap name
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The char* for the heap name.
+ */
+const char *dma_heap_get_name(struct dma_heap *heap)
+{
+ return heap->name;
+}
+
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
{
struct dma_heap *heap, *h, *err_ret;
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 5d64eccd21d6..0c05b79870f9 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -339,6 +339,7 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
buffer->pagecount = pagecount;
/* create the dmabuf */
+ exp_info.exp_name = dma_heap_get_name(heap);
exp_info.ops = &cma_heap_buf_ops;
exp_info.size = buffer->len;
exp_info.flags = fd_flags;
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 29e49ac17251..23a7e74ef966 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -390,6 +390,7 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
}
/* create the dmabuf */
+ exp_info.exp_name = dma_heap_get_name(heap);
exp_info.ops = &system_heap_buf_ops;
exp_info.size = buffer->len;
exp_info.flags = fd_flags;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 85b79a7fee63..1461652921be 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -352,8 +352,6 @@ source "drivers/gpu/drm/vc4/Kconfig"
source "drivers/gpu/drm/etnaviv/Kconfig"
-source "drivers/gpu/drm/arc/Kconfig"
-
source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 926adef289db..5eb5bf7c16e3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -44,7 +44,8 @@ drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
- drm_scdc_helper.o drm_gem_framebuffer_helper.o \
+ drm_scdc_helper.o drm_gem_atomic_helper.o \
+ drm_gem_framebuffer_helper.o \
drm_atomic_state_helper.o drm_damage_helper.o \
drm_format_helper.o drm_self_refresh_helper.o
@@ -110,7 +111,6 @@ obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
-obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/
obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 49267eb64302..1af2fa1591fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1066,7 +1066,7 @@ static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
return &adev->ddev;
}
-static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
{
return container_of(bdev, struct amdgpu_device, mman.bdev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 3107b9575929..5af464933976 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -40,13 +40,13 @@ static atomic_t fence_seq = ATOMIC_INIT(0);
* All the BOs in a process share an eviction fence. When process X wants
* to map VRAM memory but TTM can't find enough space, TTM will attempt to
* evict BOs from its LRU list. TTM checks if the BO is valuable to evict
- * by calling ttm_bo_driver->eviction_valuable().
+ * by calling ttm_device_funcs->eviction_valuable().
*
- * ttm_bo_driver->eviction_valuable() - will return false if the BO belongs
+ * ttm_device_funcs->eviction_valuable() - will return false if the BO belongs
* to process X. Otherwise, it will return true to indicate BO can be
* evicted by TTM.
*
- * If ttm_bo_driver->eviction_valuable returns true, then TTM will continue
+ * If ttm_device_funcs->eviction_valuable returns true, then TTM will continue
* the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move
* --> amdgpu_copy_buffer(). This sets up job in GPU scheduler.
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index ac0a432a9bf7..9f6b299cbf74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -119,6 +119,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
*/
#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
+static size_t amdgpu_amdkfd_acc_size(uint64_t size)
+{
+ size >>= PAGE_SHIFT;
+ size *= sizeof(dma_addr_t) + sizeof(void *);
+
+ return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
+ __roundup_pow_of_two(sizeof(struct ttm_tt)) +
+ PAGE_ALIGN(size);
+}
+
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 domain, bool sg)
{
@@ -127,8 +137,7 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
int ret = 0;
- acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
- sizeof(struct amdgpu_bo));
+ acc_size = amdgpu_amdkfd_acc_size(size);
vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
@@ -175,8 +184,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev,
{
size_t acc_size;
- acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
- sizeof(struct amdgpu_bo));
+ acc_size = amdgpu_amdkfd_acc_size(size);
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d56f4023ebb3..8e0a5650d383 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -487,7 +487,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
- timeout, ring->name);
+ timeout, NULL, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 0db933026722..fde2d899b2c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
- struct page *dummy_page = ttm_bo_glob.dummy_read_page;
+ struct page *dummy_page = ttm_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index ff48101bab55..759b34799221 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,7 +28,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-static void amdgpu_job_timedout(struct drm_sched_job *s_job)
+static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
@@ -41,7 +41,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name);
- return;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
@@ -53,10 +53,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
} else {
drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev))
adev->virt.tdr_debug = true;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 4b29b8205442..984dcf5a475e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -523,7 +523,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
- size_t acc_size;
int r;
/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
@@ -546,9 +545,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
*bo_ptr = NULL;
- acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
- sizeof(struct amdgpu_bo));
-
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
@@ -577,8 +573,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.priority = 1;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
- &bo->placement, page_align, &ctx, acc_size,
- NULL, bp->resv, &amdgpu_bo_destroy);
+ &bo->placement, page_align, &ctx, NULL,
+ bp->resv, &amdgpu_bo_destroy);
if (unlikely(r != 0))
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 9fd2157b133a..a785acc09f20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -61,10 +61,10 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
-static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
-static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
@@ -646,7 +646,7 @@ out:
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
-static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
+static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct drm_mm_node *mm_node = mem->mm_node;
@@ -893,7 +893,7 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
*
* Called by amdgpu_ttm_backend_bind()
**/
-static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@ -931,7 +931,7 @@ release_sg:
/*
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/
-static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@ -1015,7 +1015,7 @@ gart_bind_fail:
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space.
*/
-static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
@@ -1155,7 +1155,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy().
*/
-static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@ -1180,7 +1180,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
gtt->bound = false;
}
-static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1234,7 +1234,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible
* to the underlying device.
*/
-static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{
@@ -1278,7 +1278,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
-static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1603,7 +1603,7 @@ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
amdgpu_bo_move_notify(bo, false, NULL);
}
-static struct ttm_bo_driver amdgpu_bo_driver = {
+static struct ttm_device_funcs amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
@@ -1785,7 +1785,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
+ r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb,
@@ -1926,7 +1926,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
- ttm_bo_device_release(&adev->mman.bdev);
+ ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}
@@ -2002,7 +2002,7 @@ unlock:
return ret;
}
-static struct vm_operations_struct amdgpu_ttm_vm_ops = {
+static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
.fault = amdgpu_ttm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index d2987536d7cd..7189f8370108 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -60,7 +60,7 @@ struct amdgpu_gtt_mgr {
};
struct amdgpu_mman {
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
bool initialized;
void __iomem *aper_base_kaddr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ad91c0c3c423..9d19078246c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -638,15 +638,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -660,7 +660,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
&bo->shadow->tbo.mem,
&vm->lru_bulk_move);
}
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
vm->bulk_moveable = true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 7944781e1086..ea825b4f8ee8 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1862,7 +1862,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1981,8 +1980,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 1b6ff0470011..a360a6dec198 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1904,7 +1904,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -2023,8 +2022,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 83a88385b762..ef124ac853b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1820,7 +1820,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1929,8 +1928,8 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 224b30214427..c98650183828 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1791,7 +1791,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1902,8 +1901,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 573cf17262da..55e39b462a5e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -4583,7 +4583,6 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
to_amdgpu_framebuffer(plane_state->fb);
- struct drm_format_name_buf format_name;
int ret;
memset(plane_info, 0, sizeof(*plane_info));
@@ -4631,8 +4630,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
break;
default:
DRM_ERROR(
- "Unsupported screen format %s\n",
- drm_get_format_name(fb->format->format, &format_name));
+ "Unsupported screen format %p4cc\n",
+ &fb->format->format);
return -EINVAL;
}
@@ -6515,8 +6514,10 @@ static int dm_plane_helper_check_state(struct drm_plane_state *state,
}
static int dm_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct amdgpu_device *adev = drm_to_adev(plane->dev);
struct dc *dc = adev->dm.dc;
struct dm_plane_state *dm_plane_state;
@@ -6524,23 +6525,24 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *new_crtc_state;
int ret;
- trace_amdgpu_dm_plane_atomic_check(state);
+ trace_amdgpu_dm_plane_atomic_check(new_plane_state);
- dm_plane_state = to_dm_plane_state(state);
+ dm_plane_state = to_dm_plane_state(new_plane_state);
if (!dm_plane_state->dc_state)
return 0;
new_crtc_state =
- drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
if (!new_crtc_state)
return -EINVAL;
- ret = dm_plane_helper_check_state(state, new_crtc_state);
+ ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
return ret;
- ret = fill_dc_scaling_info(state, &scaling_info);
+ ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
if (ret)
return ret;
@@ -6551,7 +6553,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
}
static int dm_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_plane_state *new_plane_state)
+ struct drm_atomic_state *state)
{
/* Only support async updates on cursor planes. */
if (plane->type != DRM_PLANE_TYPE_CURSOR)
@@ -6561,10 +6563,12 @@ static int dm_plane_atomic_async_check(struct drm_plane *plane,
}
static void dm_plane_atomic_async_update(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_plane_state *old_state =
- drm_atomic_get_old_plane_state(new_state->state, plane);
+ drm_atomic_get_old_plane_state(state, plane);
trace_amdgpu_dm_atomic_update_cursor(new_state);
diff --git a/drivers/gpu/drm/arc/Kconfig b/drivers/gpu/drm/arc/Kconfig
deleted file mode 100644
index e8f3d63e0b91..000000000000
--- a/drivers/gpu/drm/arc/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config DRM_ARCPGU
- tristate "ARC PGU"
- depends on DRM && OF
- select DRM_KMS_CMA_HELPER
- select DRM_KMS_HELPER
- help
- Choose this option if you have an ARC PGU controller.
-
- If M is selected the module will be called arcpgu.
diff --git a/drivers/gpu/drm/arc/Makefile b/drivers/gpu/drm/arc/Makefile
deleted file mode 100644
index c7028b7427b3..000000000000
--- a/drivers/gpu/drm/arc/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_sim.o arcpgu_drv.o
-obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
deleted file mode 100644
index 6aac44b953ad..000000000000
--- a/drivers/gpu/drm/arc/arcpgu.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * ARC PGU DRM driver.
- *
- * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
- */
-
-#ifndef _ARCPGU_H_
-#define _ARCPGU_H_
-
-struct arcpgu_drm_private {
- void __iomem *regs;
- struct clk *clk;
- struct drm_framebuffer *fb;
- struct drm_crtc crtc;
- struct drm_plane *plane;
-};
-
-#define crtc_to_arcpgu_priv(x) container_of(x, struct arcpgu_drm_private, crtc)
-
-static inline void arc_pgu_write(struct arcpgu_drm_private *arcpgu,
- unsigned int reg, u32 value)
-{
- iowrite32(value, arcpgu->regs + reg);
-}
-
-static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
- unsigned int reg)
-{
- return ioread32(arcpgu->regs + reg);
-}
-
-int arc_pgu_setup_crtc(struct drm_device *dev);
-int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
-int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np);
-
-#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
deleted file mode 100644
index 895cdd991af6..000000000000
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ARC PGU DRM driver.
- *
- * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
- */
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_device.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <linux/clk.h>
-#include <linux/platform_data/simplefb.h>
-
-#include "arcpgu.h"
-#include "arcpgu_regs.h"
-
-#define ENCODE_PGU_XY(x, y) ((((x) - 1) << 16) | ((y) - 1))
-
-static const u32 arc_pgu_supported_formats[] = {
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
-};
-
-static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
-{
- struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- const struct drm_framebuffer *fb = crtc->primary->state->fb;
- uint32_t pixel_format = fb->format->format;
- u32 format = DRM_FORMAT_INVALID;
- int i;
- u32 reg_ctrl;
-
- for (i = 0; i < ARRAY_SIZE(arc_pgu_supported_formats); i++) {
- if (arc_pgu_supported_formats[i] == pixel_format)
- format = arc_pgu_supported_formats[i];
- }
-
- if (WARN_ON(format == DRM_FORMAT_INVALID))
- return;
-
- reg_ctrl = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
- if (format == DRM_FORMAT_RGB565)
- reg_ctrl &= ~ARCPGU_MODE_XRGB8888;
- else
- reg_ctrl |= ARCPGU_MODE_XRGB8888;
- arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, reg_ctrl);
-}
-
-static const struct drm_crtc_funcs arc_pgu_crtc_funcs = {
- .destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
-};
-
-static enum drm_mode_status arc_pgu_crtc_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
-{
- struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- long rate, clk_rate = mode->clock * 1000;
- long diff = clk_rate / 200; /* +-0.5% allowed by HDMI spec */
-
- rate = clk_round_rate(arcpgu->clk, clk_rate);
- if ((max(rate, clk_rate) - min(rate, clk_rate) < diff) && (rate > 0))
- return MODE_OK;
-
- return MODE_NOCLOCK;
-}
-
-static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
-{
- struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
- struct drm_display_mode *m = &crtc->state->adjusted_mode;
- u32 val;
-
- arc_pgu_write(arcpgu, ARCPGU_REG_FMT,
- ENCODE_PGU_XY(m->crtc_htotal, m->crtc_vtotal));
-
- arc_pgu_write(arcpgu, ARCPGU_REG_HSYNC,
- ENCODE_PGU_XY(m->crtc_hsync_start - m->crtc_hdisplay,
- m->crtc_hsync_end - m->crtc_hdisplay));
-
- arc_pgu_write(arcpgu, ARCPGU_REG_VSYNC,
- ENCODE_PGU_XY(m->crtc_vsync_start - m->crtc_vdisplay,
- m->crtc_vsync_end - m->crtc_vdisplay));
-
- arc_pgu_write(arcpgu, ARCPGU_REG_ACTIVE,
- ENCODE_PGU_XY(m->crtc_hblank_end - m->crtc_hblank_start,
- m->crtc_vblank_end - m->crtc_vblank_start));
-
- val = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
-
- if (m->flags & DRM_MODE_FLAG_PVSYNC)
- val |= ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST;
- else
- val &= ~(ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST);
-
- if (m->flags & DRM_MODE_FLAG_PHSYNC)
- val |= ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST;
- else
- val &= ~(ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST);
-
- arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, val);
- arc_pgu_write(arcpgu, ARCPGU_REG_STRIDE, 0);
- arc_pgu_write(arcpgu, ARCPGU_REG_START_SET, 1);
-
- arc_pgu_set_pxl_fmt(crtc);
-
- clk_set_rate(arcpgu->clk, m->crtc_clock * 1000);
-}
-
-static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
-
- clk_prepare_enable(arcpgu->clk);
- arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
- arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
- ARCPGU_CTRL_ENABLE_MASK);
-}
-
-static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
-
- clk_disable_unprepare(arcpgu->clk);
- arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
- arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
- ~ARCPGU_CTRL_ENABLE_MASK);
-}
-
-static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
- .mode_valid = arc_pgu_crtc_mode_valid,
- .mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
- .atomic_enable = arc_pgu_crtc_atomic_enable,
- .atomic_disable = arc_pgu_crtc_atomic_disable,
-};
-
-static void arc_pgu_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- struct arcpgu_drm_private *arcpgu;
- struct drm_gem_cma_object *gem;
-
- if (!plane->state->crtc || !plane->state->fb)
- return;
-
- arcpgu = crtc_to_arcpgu_priv(plane->state->crtc);
- gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
- arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->paddr);
-}
-
-static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
- .atomic_update = arc_pgu_plane_atomic_update,
-};
-
-static const struct drm_plane_funcs arc_pgu_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm)
-{
- struct arcpgu_drm_private *arcpgu = drm->dev_private;
- struct drm_plane *plane = NULL;
- int ret;
-
- plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return ERR_PTR(-ENOMEM);
-
- ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs,
- arc_pgu_supported_formats,
- ARRAY_SIZE(arc_pgu_supported_formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret)
- return ERR_PTR(ret);
-
- drm_plane_helper_add(plane, &arc_pgu_plane_helper_funcs);
- arcpgu->plane = plane;
-
- return plane;
-}
-
-int arc_pgu_setup_crtc(struct drm_device *drm)
-{
- struct arcpgu_drm_private *arcpgu = drm->dev_private;
- struct drm_plane *primary;
- int ret;
-
- primary = arc_pgu_plane_init(drm);
- if (IS_ERR(primary))
- return PTR_ERR(primary);
-
- ret = drm_crtc_init_with_planes(drm, &arcpgu->crtc, primary, NULL,
- &arc_pgu_crtc_funcs, NULL);
- if (ret) {
- drm_plane_cleanup(primary);
- return ret;
- }
-
- drm_crtc_helper_add(&arcpgu->crtc, &arc_pgu_crtc_helper_funcs);
- return 0;
-}
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
deleted file mode 100644
index 077d006b1fbf..000000000000
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ /dev/null
@@ -1,224 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ARC PGU DRM driver.
- *
- * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
- */
-
-#include <linux/clk.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_debugfs.h>
-#include <drm/drm_device.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/of_reserved_mem.h>
-#include <linux/platform_device.h>
-
-#include "arcpgu.h"
-#include "arcpgu_regs.h"
-
-static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
- .fb_create = drm_gem_fb_create,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
-
-static void arcpgu_setup_mode_config(struct drm_device *drm)
-{
- drm_mode_config_init(drm);
- drm->mode_config.min_width = 0;
- drm->mode_config.min_height = 0;
- drm->mode_config.max_width = 1920;
- drm->mode_config.max_height = 1080;
- drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
-}
-
-DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
-
-static int arcpgu_load(struct drm_device *drm)
-{
- struct platform_device *pdev = to_platform_device(drm->dev);
- struct arcpgu_drm_private *arcpgu;
- struct device_node *encoder_node = NULL, *endpoint_node = NULL;
- struct resource *res;
- int ret;
-
- arcpgu = devm_kzalloc(&pdev->dev, sizeof(*arcpgu), GFP_KERNEL);
- if (arcpgu == NULL)
- return -ENOMEM;
-
- drm->dev_private = arcpgu;
-
- arcpgu->clk = devm_clk_get(drm->dev, "pxlclk");
- if (IS_ERR(arcpgu->clk))
- return PTR_ERR(arcpgu->clk);
-
- arcpgu_setup_mode_config(drm);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(arcpgu->regs))
- return PTR_ERR(arcpgu->regs);
-
- dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
- arc_pgu_read(arcpgu, ARCPGU_REG_ID));
-
- /* Get the optional framebuffer memory resource */
- ret = of_reserved_mem_device_init(drm->dev);
- if (ret && ret != -ENODEV)
- return ret;
-
- if (dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)))
- return -ENODEV;
-
- if (arc_pgu_setup_crtc(drm) < 0)
- return -ENODEV;
-
- /*
- * There is only one output port inside each device. It is linked with
- * encoder endpoint.
- */
- endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
- if (endpoint_node) {
- encoder_node = of_graph_get_remote_port_parent(endpoint_node);
- of_node_put(endpoint_node);
- }
-
- if (encoder_node) {
- ret = arcpgu_drm_hdmi_init(drm, encoder_node);
- of_node_put(encoder_node);
- if (ret < 0)
- return ret;
- } else {
- dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n");
- ret = arcpgu_drm_sim_init(drm, NULL);
- if (ret < 0)
- return ret;
- }
-
- drm_mode_config_reset(drm);
- drm_kms_helper_poll_init(drm);
-
- platform_set_drvdata(pdev, drm);
- return 0;
-}
-
-static int arcpgu_unload(struct drm_device *drm)
-{
- drm_kms_helper_poll_fini(drm);
- drm_atomic_helper_shutdown(drm);
- drm_mode_config_cleanup(drm);
-
- return 0;
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int arcpgu_show_pxlclock(struct seq_file *m, void *arg)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
- struct arcpgu_drm_private *arcpgu = drm->dev_private;
- unsigned long clkrate = clk_get_rate(arcpgu->clk);
- unsigned long mode_clock = arcpgu->crtc.mode.crtc_clock * 1000;
-
- seq_printf(m, "hw : %lu\n", clkrate);
- seq_printf(m, "mode: %lu\n", mode_clock);
- return 0;
-}
-
-static struct drm_info_list arcpgu_debugfs_list[] = {
- { "clocks", arcpgu_show_pxlclock, 0 },
-};
-
-static void arcpgu_debugfs_init(struct drm_minor *minor)
-{
- drm_debugfs_create_files(arcpgu_debugfs_list,
- ARRAY_SIZE(arcpgu_debugfs_list),
- minor->debugfs_root, minor);
-}
-#endif
-
-static const struct drm_driver arcpgu_drm_driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .name = "arcpgu",
- .desc = "ARC PGU Controller",
- .date = "20160219",
- .major = 1,
- .minor = 0,
- .patchlevel = 0,
- .fops = &arcpgu_drm_ops,
- DRM_GEM_CMA_DRIVER_OPS,
-#ifdef CONFIG_DEBUG_FS
- .debugfs_init = arcpgu_debugfs_init,
-#endif
-};
-
-static int arcpgu_probe(struct platform_device *pdev)
-{
- struct drm_device *drm;
- int ret;
-
- drm = drm_dev_alloc(&arcpgu_drm_driver, &pdev->dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- ret = arcpgu_load(drm);
- if (ret)
- goto err_unref;
-
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto err_unload;
-
- drm_fbdev_generic_setup(drm, 16);
-
- return 0;
-
-err_unload:
- arcpgu_unload(drm);
-
-err_unref:
- drm_dev_put(drm);
-
- return ret;
-}
-
-static int arcpgu_remove(struct platform_device *pdev)
-{
- struct drm_device *drm = platform_get_drvdata(pdev);
-
- drm_dev_unregister(drm);
- arcpgu_unload(drm);
- drm_dev_put(drm);
-
- return 0;
-}
-
-static const struct of_device_id arcpgu_of_table[] = {
- {.compatible = "snps,arcpgu"},
- {}
-};
-
-MODULE_DEVICE_TABLE(of, arcpgu_of_table);
-
-static struct platform_driver arcpgu_platform_driver = {
- .probe = arcpgu_probe,
- .remove = arcpgu_remove,
- .driver = {
- .name = "arcpgu",
- .of_match_table = arcpgu_of_table,
- },
-};
-
-module_platform_driver(arcpgu_platform_driver);
-
-MODULE_AUTHOR("Carlos Palminha <palminha@synopsys.com>");
-MODULE_DESCRIPTION("ARC PGU DRM driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
deleted file mode 100644
index 52839934f2fb..000000000000
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ARC PGU DRM driver.
- *
- * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
- */
-
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_device.h>
-
-#include "arcpgu.h"
-
-static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
-{
- struct drm_encoder *encoder;
- struct drm_bridge *bridge;
-
- int ret = 0;
-
- encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
- if (encoder == NULL)
- return -ENOMEM;
-
- /* Locate drm bridge from the hdmi encoder DT node */
- bridge = of_drm_find_bridge(np);
- if (!bridge)
- return -EPROBE_DEFER;
-
- encoder->possible_crtcs = 1;
- encoder->possible_clones = 0;
- ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret)
- return ret;
-
- /* Link drm_bridge to encoder */
- ret = drm_bridge_attach(encoder, bridge, NULL, 0);
- if (ret)
- drm_encoder_cleanup(encoder);
-
- return ret;
-}
diff --git a/drivers/gpu/drm/arc/arcpgu_regs.h b/drivers/gpu/drm/arc/arcpgu_regs.h
deleted file mode 100644
index b689a382d556..000000000000
--- a/drivers/gpu/drm/arc/arcpgu_regs.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * ARC PGU DRM driver.
- *
- * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
- */
-
-#ifndef _ARC_PGU_REGS_H_
-#define _ARC_PGU_REGS_H_
-
-#define ARCPGU_REG_CTRL 0x00
-#define ARCPGU_REG_STAT 0x04
-#define ARCPGU_REG_FMT 0x10
-#define ARCPGU_REG_HSYNC 0x14
-#define ARCPGU_REG_VSYNC 0x18
-#define ARCPGU_REG_ACTIVE 0x1c
-#define ARCPGU_REG_BUF0_ADDR 0x40
-#define ARCPGU_REG_STRIDE 0x50
-#define ARCPGU_REG_START_SET 0x84
-
-#define ARCPGU_REG_ID 0x3FC
-
-#define ARCPGU_CTRL_ENABLE_MASK 0x02
-#define ARCPGU_CTRL_VS_POL_MASK 0x1
-#define ARCPGU_CTRL_VS_POL_OFST 0x3
-#define ARCPGU_CTRL_HS_POL_MASK 0x1
-#define ARCPGU_CTRL_HS_POL_OFST 0x4
-#define ARCPGU_MODE_XRGB8888 BIT(2)
-#define ARCPGU_STAT_BUSY_MASK 0x02
-
-#endif
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
deleted file mode 100644
index 37d961668dfe..000000000000
--- a/drivers/gpu/drm/arc/arcpgu_sim.c
+++ /dev/null
@@ -1,108 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * ARC PGU DRM driver.
- *
- * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
- */
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_device.h>
-#include <drm/drm_probe_helper.h>
-
-#include "arcpgu.h"
-
-#define XRES_DEF 640
-#define YRES_DEF 480
-
-#define XRES_MAX 8192
-#define YRES_MAX 8192
-
-
-struct arcpgu_drm_connector {
- struct drm_connector connector;
-};
-
-static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
-{
- int count;
-
- count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
- drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
- return count;
-}
-
-static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
-static const struct drm_connector_helper_funcs
-arcpgu_drm_connector_helper_funcs = {
- .get_modes = arcpgu_drm_connector_get_modes,
-};
-
-static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = arcpgu_drm_connector_destroy,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
-{
- struct arcpgu_drm_connector *arcpgu_connector;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
- encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
- if (encoder == NULL)
- return -ENOMEM;
-
- encoder->possible_crtcs = 1;
- encoder->possible_clones = 0;
-
- ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
- if (ret)
- return ret;
-
- arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
- GFP_KERNEL);
- if (!arcpgu_connector) {
- ret = -ENOMEM;
- goto error_encoder_cleanup;
- }
-
- connector = &arcpgu_connector->connector;
- drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
-
- ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL);
- if (ret < 0) {
- dev_err(drm->dev, "failed to initialize drm connector\n");
- goto error_encoder_cleanup;
- }
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret < 0) {
- dev_err(drm->dev, "could not attach connector to encoder\n");
- drm_connector_unregister(connector);
- goto error_connector_cleanup;
- }
-
- return 0;
-
-error_connector_cleanup:
- drm_connector_cleanup(connector);
-
-error_encoder_cleanup:
- drm_encoder_cleanup(encoder);
- return ret;
-}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
index 32273cf18f7c..cf7a183f773d 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_format_caps.h
@@ -82,17 +82,6 @@ struct komeda_format_caps_table {
extern u64 komeda_supported_modifiers[];
-static inline const char *komeda_get_format_name(u32 fourcc, u64 modifier)
-{
- struct drm_format_name_buf buf;
- static char name[64];
-
- snprintf(name, sizeof(name), "%s with modifier: 0x%llx.",
- drm_get_format_name(fourcc, &buf), modifier);
-
- return name;
-}
-
const struct komeda_format_caps *
komeda_get_format_caps(struct komeda_format_caps_table *table,
u32 fourcc, u64 modifier);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 170f9dc8ec19..3c372d2deb0a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -276,8 +276,8 @@ bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type,
supported = komeda_format_mod_supported(&mdev->fmt_tbl, layer_type,
fourcc, modifier, rot);
if (!supported)
- DRM_DEBUG_ATOMIC("Layer TYPE: %d doesn't support fb FMT: %s.\n",
- layer_type, komeda_get_format_name(fourcc, modifier));
+ DRM_DEBUG_ATOMIC("Layer TYPE: %d doesn't support fb FMT: %p4cc with modifier: 0x%llx.\n",
+ layer_type, &fourcc, modifier);
return supported;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 034ee08482e0..aeda4e5ec4f4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -73,6 +73,7 @@ static const struct drm_driver komeda_kms_driver = {
static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
+ bool fence_cookie = dma_fence_begin_signalling();
drm_atomic_helper_commit_modeset_disables(dev, old_state);
@@ -85,6 +86,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_wait_for_flip_done(dev, old_state);
+ dma_fence_end_signalling(fence_cookie);
+
drm_atomic_helper_cleanup_planes(dev, old_state);
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 98e915e325dd..d63d83800a8a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -49,10 +49,8 @@ komeda_plane_init_data_flow(struct drm_plane_state *st,
dflow->rot = drm_rotation_simplify(st->rotation, caps->supported_rots);
if (!has_bits(dflow->rot, caps->supported_rots)) {
- DRM_DEBUG_ATOMIC("rotation(0x%x) isn't supported by %s.\n",
- dflow->rot,
- komeda_get_format_name(caps->fourcc,
- fb->modifier));
+ DRM_DEBUG_ATOMIC("rotation(0x%x) isn't supported by %p4cc with modifier: 0x%llx.\n",
+ dflow->rot, &caps->fourcc, fb->modifier);
return -EINVAL;
}
@@ -71,20 +69,23 @@ komeda_plane_init_data_flow(struct drm_plane_state *st,
*/
static int
komeda_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct komeda_plane *kplane = to_kplane(plane);
- struct komeda_plane_state *kplane_st = to_kplane_st(state);
+ struct komeda_plane_state *kplane_st = to_kplane_st(new_plane_state);
struct komeda_layer *layer = kplane->layer;
struct drm_crtc_state *crtc_st;
struct komeda_crtc_state *kcrtc_st;
struct komeda_data_flow_cfg dflow;
int err;
- if (!state->crtc || !state->fb)
+ if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
- crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc);
+ crtc_st = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
if (IS_ERR(crtc_st) || !crtc_st->enable) {
DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
return -EINVAL;
@@ -96,7 +97,7 @@ komeda_plane_atomic_check(struct drm_plane *plane,
kcrtc_st = to_kcrtc_st(crtc_st);
- err = komeda_plane_init_data_flow(state, kcrtc_st, &dflow);
+ err = komeda_plane_init_data_flow(new_plane_state, kcrtc_st, &dflow);
if (err)
return err;
@@ -115,7 +116,7 @@ komeda_plane_atomic_check(struct drm_plane *plane,
*/
static void
komeda_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
}
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index a3234bfb0917..7adb065169e9 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -229,12 +229,14 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
};
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
int i;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- u32 src_h = state->src_h >> 16;
+ u32 src_h = new_plane_state->src_h >> 16;
/* only the HDLCD_REG_FB_LINE_COUNT register has a limit */
if (src_h >= HDLCD_MAX_YRES) {
@@ -242,23 +244,27 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- for_each_new_crtc_in_state(state->state, crtc, crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, crtc_state,
+ i) {
/* we cannot disable the plane while the CRTC is active */
- if (!state->fb && crtc_state->active)
+ if (!new_plane_state->fb && crtc_state->active)
return -EINVAL;
- return drm_atomic_helper_check_plane_state(state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
}
return 0;
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *fb = new_plane_state->fb;
struct hdlcd_drm_private *hdlcd;
u32 dest_h;
dma_addr_t scanout_start;
@@ -266,8 +272,8 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
if (!fb)
return;
- dest_h = drm_rect_height(&plane->state->dst);
- scanout_start = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
+ dest_h = drm_rect_height(&new_plane_state->dst);
+ scanout_start = drm_fb_cma_get_gem_addr(fb, new_plane_state, 0);
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index fceda010d65a..d83c7366b348 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -234,6 +234,7 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
+ bool fence_cookie = dma_fence_begin_signalling();
pm_runtime_get_sync(drm->dev);
@@ -260,6 +261,8 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
malidp_atomic_commit_hw_done(state);
+ dma_fence_end_signalling(fence_cookie);
+
pm_runtime_put(drm->dev);
drm_atomic_helper_cleanup_planes(drm, state);
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 7d0e7b031e44..f5847a79dd7e 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -151,11 +151,8 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
fb->format->format, !!fb->modifier);
if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
- struct drm_format_name_buf format_name;
-
- DRM_DEBUG_KMS("Invalid pixel format %s\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ DRM_DEBUG_KMS("Invalid pixel format %p4cc\n",
+ &fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 351a85088d0e..ddbba67f0283 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -502,20 +502,22 @@ static void malidp_de_prefetch_settings(struct malidp_plane *mp,
}
static int malidp_de_plane_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct malidp_plane *mp = to_malidp_plane(plane);
- struct malidp_plane_state *ms = to_malidp_plane_state(state);
- bool rotated = state->rotation & MALIDP_ROTATED_MASK;
+ struct malidp_plane_state *ms = to_malidp_plane_state(new_plane_state);
+ bool rotated = new_plane_state->rotation & MALIDP_ROTATED_MASK;
struct drm_framebuffer *fb;
- u16 pixel_alpha = state->pixel_blend_mode;
+ u16 pixel_alpha = new_plane_state->pixel_blend_mode;
int i, ret;
unsigned int block_w, block_h;
- if (!state->crtc || WARN_ON(!state->fb))
+ if (!new_plane_state->crtc || WARN_ON(!new_plane_state->fb))
return 0;
- fb = state->fb;
+ fb = new_plane_state->fb;
ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
mp->layer->id, fb->format->format,
@@ -541,15 +543,15 @@ static int malidp_de_plane_check(struct drm_plane *plane,
DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes");
return -EINVAL;
}
- if ((state->src_x >> 16) % block_w || (state->src_y >> 16) % block_h) {
+ if ((new_plane_state->src_x >> 16) % block_w || (new_plane_state->src_y >> 16) % block_h) {
DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes");
return -EINVAL;
}
- if ((state->crtc_w > mp->hwdev->max_line_size) ||
- (state->crtc_h > mp->hwdev->max_line_size) ||
- (state->crtc_w < mp->hwdev->min_line_size) ||
- (state->crtc_h < mp->hwdev->min_line_size))
+ if ((new_plane_state->crtc_w > mp->hwdev->max_line_size) ||
+ (new_plane_state->crtc_h > mp->hwdev->max_line_size) ||
+ (new_plane_state->crtc_w < mp->hwdev->min_line_size) ||
+ (new_plane_state->crtc_h < mp->hwdev->min_line_size))
return -EINVAL;
/*
@@ -559,15 +561,15 @@ static int malidp_de_plane_check(struct drm_plane *plane,
*/
if (ms->n_planes == 3 &&
!(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
- (state->fb->pitches[1] != state->fb->pitches[2]))
+ (new_plane_state->fb->pitches[1] != new_plane_state->fb->pitches[2]))
return -EINVAL;
- ret = malidp_se_check_scaling(mp, state);
+ ret = malidp_se_check_scaling(mp, new_plane_state);
if (ret)
return ret;
/* validate the rotation constraints for each layer */
- if (state->rotation != DRM_MODE_ROTATE_0) {
+ if (new_plane_state->rotation != DRM_MODE_ROTATE_0) {
if (mp->layer->rot == ROTATE_NONE)
return -EINVAL;
if ((mp->layer->rot == ROTATE_COMPRESSED) && !(fb->modifier))
@@ -588,11 +590,11 @@ static int malidp_de_plane_check(struct drm_plane *plane,
}
ms->rotmem_size = 0;
- if (state->rotation & MALIDP_ROTATED_MASK) {
+ if (new_plane_state->rotation & MALIDP_ROTATED_MASK) {
int val;
- val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
- state->crtc_h,
+ val = mp->hwdev->hw->rotmem_required(mp->hwdev, new_plane_state->crtc_w,
+ new_plane_state->crtc_h,
fb->format->format,
!!(fb->modifier));
if (val < 0)
@@ -602,7 +604,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
}
/* HW can't support plane + pixel blending */
- if ((state->alpha != DRM_BLEND_ALPHA_OPAQUE) &&
+ if ((new_plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE) &&
(pixel_alpha != DRM_MODE_BLEND_PIXEL_NONE) &&
fb->format->has_alpha)
return -EINVAL;
@@ -789,13 +791,16 @@ static void malidp_de_set_plane_afbc(struct drm_plane *plane)
}
static void malidp_de_plane_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct malidp_plane *mp;
struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
- struct drm_plane_state *state = plane->state;
- u16 pixel_alpha = state->pixel_blend_mode;
- u8 plane_alpha = state->alpha >> 8;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ u16 pixel_alpha = new_state->pixel_blend_mode;
+ u8 plane_alpha = new_state->alpha >> 8;
u32 src_w, src_h, dest_w, dest_h, val;
int i;
struct drm_framebuffer *fb = plane->state->fb;
@@ -811,12 +816,12 @@ static void malidp_de_plane_update(struct drm_plane *plane,
src_h = fb->height;
} else {
/* convert src values from Q16 fixed point to integer */
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ src_w = new_state->src_w >> 16;
+ src_h = new_state->src_h >> 16;
}
- dest_w = state->crtc_w;
- dest_h = state->crtc_h;
+ dest_w = new_state->crtc_w;
+ dest_h = new_state->crtc_h;
val = malidp_hw_read(mp->hwdev, mp->layer->base);
val = (val & ~LAYER_FORMAT_MASK) | ms->format;
@@ -828,7 +833,7 @@ static void malidp_de_plane_update(struct drm_plane *plane,
malidp_de_set_mmu_control(mp, ms);
malidp_de_set_plane_pitches(mp, ms->n_planes,
- state->fb->pitches);
+ new_state->fb->pitches);
if ((plane->state->color_encoding != old_state->color_encoding) ||
(plane->state->color_range != old_state->color_range))
@@ -841,8 +846,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
mp->layer->base + MALIDP_LAYER_COMP_SIZE);
- malidp_hw_write(mp->hwdev, LAYER_H_VAL(state->crtc_x) |
- LAYER_V_VAL(state->crtc_y),
+ malidp_hw_write(mp->hwdev, LAYER_H_VAL(new_state->crtc_x) |
+ LAYER_V_VAL(new_state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
if (mp->layer->id == DE_SMART) {
@@ -864,19 +869,19 @@ static void malidp_de_plane_update(struct drm_plane *plane,
val &= ~LAYER_ROT_MASK;
/* setup the rotation and axis flip bits */
- if (state->rotation & DRM_MODE_ROTATE_MASK)
+ if (new_state->rotation & DRM_MODE_ROTATE_MASK)
val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) <<
LAYER_ROT_OFFSET;
- if (state->rotation & DRM_MODE_REFLECT_X)
+ if (new_state->rotation & DRM_MODE_REFLECT_X)
val |= LAYER_H_FLIP;
- if (state->rotation & DRM_MODE_REFLECT_Y)
+ if (new_state->rotation & DRM_MODE_REFLECT_Y)
val |= LAYER_V_FLIP;
val &= ~(LAYER_COMP_MASK | LAYER_PMUL_ENABLE | LAYER_ALPHA(0xff));
- if (state->alpha != DRM_BLEND_ALPHA_OPAQUE) {
+ if (new_state->alpha != DRM_BLEND_ALPHA_OPAQUE) {
val |= LAYER_COMP_PLANE;
- } else if (state->fb->format->has_alpha) {
+ } else if (new_state->fb->format->has_alpha) {
/* We only care about blend mode if the format has alpha */
switch (pixel_alpha) {
case DRM_MODE_BLEND_PREMULTI:
@@ -890,9 +895,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
val |= LAYER_ALPHA(plane_alpha);
val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
- if (state->crtc) {
+ if (new_state->crtc) {
struct malidp_crtc_state *m =
- to_malidp_crtc_state(state->crtc->state);
+ to_malidp_crtc_state(new_state->crtc->state);
if (m->scaler_config.scale_enable &&
m->scaler_config.plane_src_id == mp->layer->id)
@@ -907,7 +912,7 @@ static void malidp_de_plane_update(struct drm_plane *plane,
}
static void malidp_de_plane_disable(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
struct malidp_plane *mp = to_malidp_plane(plane);
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 6346b890279a..d3e3e5fdc390 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -66,9 +66,12 @@ static inline u32 armada_csc(struct drm_plane_state *state)
/* === Plane support === */
static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
unsigned int idx;
@@ -76,62 +79,64 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
- if (!state->fb || WARN_ON(!state->crtc))
+ if (!new_state->fb || WARN_ON(!new_state->crtc))
return;
DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
plane->base.id, plane->name,
- state->crtc->base.id, state->crtc->name,
- state->fb->base.id,
- old_state->visible, state->visible);
+ new_state->crtc->base.id, new_state->crtc->name,
+ new_state->fb->base.id,
+ old_state->visible, new_state->visible);
- dcrtc = drm_to_armada_crtc(state->crtc);
+ dcrtc = drm_to_armada_crtc(new_state->crtc);
regs = dcrtc->regs + dcrtc->regs_idx;
idx = 0;
- if (!old_state->visible && state->visible)
+ if (!old_state->visible && new_state->visible)
armada_reg_queue_mod(regs, idx,
0, CFG_PDWN16x66 | CFG_PDWN32x66,
LCD_SPU_SRAM_PARA1);
- val = armada_src_hw(state);
+ val = armada_src_hw(new_state);
if (armada_src_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_HPXL_VLN);
- val = armada_dst_yx(state);
+ val = armada_dst_yx(new_state);
if (armada_dst_yx(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN);
- val = armada_dst_hw(state);
+ val = armada_dst_hw(new_state);
if (armada_dst_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_DZM_HPXL_VLN);
/* FIXME: overlay on an interlaced display */
- if (old_state->src.x1 != state->src.x1 ||
- old_state->src.y1 != state->src.y1 ||
- old_state->fb != state->fb ||
- state->crtc->state->mode_changed) {
+ if (old_state->src.x1 != new_state->src.x1 ||
+ old_state->src.y1 != new_state->src.y1 ||
+ old_state->fb != new_state->fb ||
+ new_state->crtc->state->mode_changed) {
const struct drm_format_info *format;
u16 src_x;
- armada_reg_queue_set(regs, idx, armada_addr(state, 0, 0),
+ armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 0),
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(regs, idx, armada_addr(state, 0, 1),
+ armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 1),
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(regs, idx, armada_addr(state, 0, 2),
+ armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 2),
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(regs, idx, armada_addr(state, 1, 0),
+ armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 0),
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(regs, idx, armada_addr(state, 1, 1),
+ armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 1),
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(regs, idx, armada_addr(state, 1, 2),
+ armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 2),
LCD_SPU_DMA_START_ADDR_V1);
- val = armada_pitch(state, 0) << 16 | armada_pitch(state, 0);
+ val = armada_pitch(new_state, 0) << 16 | armada_pitch(new_state,
+ 0);
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_YC);
- val = armada_pitch(state, 1) << 16 | armada_pitch(state, 2);
+ val = armada_pitch(new_state, 1) << 16 | armada_pitch(new_state,
+ 2);
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_UV);
- cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
- CFG_DMA_MOD(drm_fb_to_armada_fb(state->fb)->mod) |
+ cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(new_state->fb)->fmt) |
+ CFG_DMA_MOD(drm_fb_to_armada_fb(new_state->fb)->mod) |
CFG_CBSH_ENA;
- if (state->visible)
+ if (new_state->visible)
cfg |= CFG_DMA_ENA;
/*
@@ -139,28 +144,28 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
* U/V planes to swap. Compensate for it by also toggling
* the UV swap.
*/
- format = state->fb->format;
- src_x = state->src.x1 >> 16;
+ format = new_state->fb->format;
+ src_x = new_state->src.x1 >> 16;
if (format->num_planes == 1 && src_x & (format->hsub - 1))
cfg ^= CFG_DMA_MOD(CFG_SWAPUV);
- if (to_armada_plane_state(state)->interlace)
+ if (to_armada_plane_state(new_state)->interlace)
cfg |= CFG_DMA_FTOGGLE;
cfg_mask = CFG_CBSH_ENA | CFG_DMAFORMAT |
CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV |
CFG_SWAPYU | CFG_YUV2RGB) |
CFG_DMA_FTOGGLE | CFG_DMA_TSTMODE |
CFG_DMA_ENA;
- } else if (old_state->visible != state->visible) {
- cfg = state->visible ? CFG_DMA_ENA : 0;
+ } else if (old_state->visible != new_state->visible) {
+ cfg = new_state->visible ? CFG_DMA_ENA : 0;
cfg_mask = CFG_DMA_ENA;
} else {
cfg = cfg_mask = 0;
}
- if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
- drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+ if (drm_rect_width(&old_state->src) != drm_rect_width(&new_state->src) ||
+ drm_rect_width(&old_state->dst) != drm_rect_width(&new_state->dst)) {
cfg_mask |= CFG_DMA_HSMOOTH;
- if (drm_rect_width(&state->src) >> 16 !=
- drm_rect_width(&state->dst))
+ if (drm_rect_width(&new_state->src) >> 16 !=
+ drm_rect_width(&new_state->dst))
cfg |= CFG_DMA_HSMOOTH;
}
@@ -168,41 +173,41 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
LCD_SPU_DMA_CTRL0);
- val = armada_spu_contrast(state);
- if ((!old_state->visible && state->visible) ||
+ val = armada_spu_contrast(new_state);
+ if ((!old_state->visible && new_state->visible) ||
armada_spu_contrast(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_CONTRAST);
- val = armada_spu_saturation(state);
- if ((!old_state->visible && state->visible) ||
+ val = armada_spu_saturation(new_state);
+ if ((!old_state->visible && new_state->visible) ||
armada_spu_saturation(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_SATURATION);
- if (!old_state->visible && state->visible)
+ if (!old_state->visible && new_state->visible)
armada_reg_queue_set(regs, idx, 0x00002000, LCD_SPU_CBSH_HUE);
- val = armada_csc(state);
- if ((!old_state->visible && state->visible) ||
+ val = armada_csc(new_state);
+ if ((!old_state->visible && new_state->visible) ||
armada_csc(old_state) != val)
armada_reg_queue_mod(regs, idx, val, CFG_CSC_MASK,
LCD_SPU_IOPAD_CONTROL);
- val = drm_to_overlay_state(state)->colorkey_yr;
- if ((!old_state->visible && state->visible) ||
+ val = drm_to_overlay_state(new_state)->colorkey_yr;
+ if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_yr != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_Y);
- val = drm_to_overlay_state(state)->colorkey_ug;
- if ((!old_state->visible && state->visible) ||
+ val = drm_to_overlay_state(new_state)->colorkey_ug;
+ if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_ug != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_U);
- val = drm_to_overlay_state(state)->colorkey_vb;
- if ((!old_state->visible && state->visible) ||
+ val = drm_to_overlay_state(new_state)->colorkey_vb;
+ if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_vb != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_V);
- val = drm_to_overlay_state(state)->colorkey_mode;
- if ((!old_state->visible && state->visible) ||
+ val = drm_to_overlay_state(new_state)->colorkey_mode;
+ if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_mode != val)
armada_reg_queue_mod(regs, idx, val, CFG_CKMODE_MASK |
CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
LCD_SPU_DMA_CTRL1);
- val = drm_to_overlay_state(state)->colorkey_enable;
- if (((!old_state->visible && state->visible) ||
+ val = drm_to_overlay_state(new_state)->colorkey_enable;
+ if (((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_enable != val) &&
dcrtc->variant->has_spu_adv_reg)
armada_reg_queue_mod(regs, idx, val, ADV_GRACOLORKEY |
@@ -212,8 +217,10 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
}
static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
unsigned int idx = 0;
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
index e7cc2b343bcb..40209e49f34a 100644
--- a/drivers/gpu/drm/armada/armada_plane.c
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -106,59 +106,67 @@ void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
}
int armada_drm_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct armada_plane_state *st = to_armada_plane_state(state);
- struct drm_crtc *crtc = state->crtc;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct armada_plane_state *st = to_armada_plane_state(new_plane_state);
+ struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
bool interlace;
int ret;
- if (!state->fb || WARN_ON(!state->crtc)) {
- state->visible = false;
+ if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) {
+ new_plane_state->visible = false;
return 0;
}
- if (state->state)
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ if (state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ crtc);
else
crtc_state = crtc->state;
- ret = drm_atomic_helper_check_plane_state(state, crtc_state, 0,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ 0,
INT_MAX, true, false);
if (ret)
return ret;
interlace = crtc_state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE;
if (interlace) {
- if ((state->dst.y1 | state->dst.y2) & 1)
+ if ((new_plane_state->dst.y1 | new_plane_state->dst.y2) & 1)
return -EINVAL;
- st->src_hw = drm_rect_height(&state->src) >> 17;
- st->dst_yx = state->dst.y1 >> 1;
- st->dst_hw = drm_rect_height(&state->dst) >> 1;
+ st->src_hw = drm_rect_height(&new_plane_state->src) >> 17;
+ st->dst_yx = new_plane_state->dst.y1 >> 1;
+ st->dst_hw = drm_rect_height(&new_plane_state->dst) >> 1;
} else {
- st->src_hw = drm_rect_height(&state->src) >> 16;
- st->dst_yx = state->dst.y1;
- st->dst_hw = drm_rect_height(&state->dst);
+ st->src_hw = drm_rect_height(&new_plane_state->src) >> 16;
+ st->dst_yx = new_plane_state->dst.y1;
+ st->dst_hw = drm_rect_height(&new_plane_state->dst);
}
st->src_hw <<= 16;
- st->src_hw |= drm_rect_width(&state->src) >> 16;
+ st->src_hw |= drm_rect_width(&new_plane_state->src) >> 16;
st->dst_yx <<= 16;
- st->dst_yx |= state->dst.x1 & 0x0000ffff;
+ st->dst_yx |= new_plane_state->dst.x1 & 0x0000ffff;
st->dst_hw <<= 16;
- st->dst_hw |= drm_rect_width(&state->dst) & 0x0000ffff;
+ st->dst_hw |= drm_rect_width(&new_plane_state->dst) & 0x0000ffff;
- armada_drm_plane_calc(state, st->addrs, st->pitches, interlace);
+ armada_drm_plane_calc(new_plane_state, st->addrs, st->pitches,
+ interlace);
st->interlace = interlace;
return 0;
}
static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
u32 cfg, cfg_mask, val;
@@ -166,71 +174,72 @@ static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
- if (!state->fb || WARN_ON(!state->crtc))
+ if (!new_state->fb || WARN_ON(!new_state->crtc))
return;
DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
plane->base.id, plane->name,
- state->crtc->base.id, state->crtc->name,
- state->fb->base.id,
- old_state->visible, state->visible);
+ new_state->crtc->base.id, new_state->crtc->name,
+ new_state->fb->base.id,
+ old_state->visible, new_state->visible);
- dcrtc = drm_to_armada_crtc(state->crtc);
+ dcrtc = drm_to_armada_crtc(new_state->crtc);
regs = dcrtc->regs + dcrtc->regs_idx;
idx = 0;
- if (!old_state->visible && state->visible) {
+ if (!old_state->visible && new_state->visible) {
val = CFG_PDWN64x66;
- if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+ if (drm_fb_to_armada_fb(new_state->fb)->fmt > CFG_420)
val |= CFG_PDWN256x24;
armada_reg_queue_mod(regs, idx, 0, val, LCD_SPU_SRAM_PARA1);
}
- val = armada_src_hw(state);
+ val = armada_src_hw(new_state);
if (armada_src_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_HPXL_VLN);
- val = armada_dst_yx(state);
+ val = armada_dst_yx(new_state);
if (armada_dst_yx(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_OVSA_HPXL_VLN);
- val = armada_dst_hw(state);
+ val = armada_dst_hw(new_state);
if (armada_dst_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_GZM_HPXL_VLN);
- if (old_state->src.x1 != state->src.x1 ||
- old_state->src.y1 != state->src.y1 ||
- old_state->fb != state->fb ||
- state->crtc->state->mode_changed) {
- armada_reg_queue_set(regs, idx, armada_addr(state, 0, 0),
+ if (old_state->src.x1 != new_state->src.x1 ||
+ old_state->src.y1 != new_state->src.y1 ||
+ old_state->fb != new_state->fb ||
+ new_state->crtc->state->mode_changed) {
+ armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 0),
LCD_CFG_GRA_START_ADDR0);
- armada_reg_queue_set(regs, idx, armada_addr(state, 1, 0),
+ armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 0),
LCD_CFG_GRA_START_ADDR1);
- armada_reg_queue_mod(regs, idx, armada_pitch(state, 0), 0xffff,
+ armada_reg_queue_mod(regs, idx, armada_pitch(new_state, 0),
+ 0xffff,
LCD_CFG_GRA_PITCH);
}
- if (old_state->fb != state->fb ||
- state->crtc->state->mode_changed) {
- cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
- CFG_GRA_MOD(drm_fb_to_armada_fb(state->fb)->mod);
- if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+ if (old_state->fb != new_state->fb ||
+ new_state->crtc->state->mode_changed) {
+ cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(new_state->fb)->fmt) |
+ CFG_GRA_MOD(drm_fb_to_armada_fb(new_state->fb)->mod);
+ if (drm_fb_to_armada_fb(new_state->fb)->fmt > CFG_420)
cfg |= CFG_PALETTE_ENA;
- if (state->visible)
+ if (new_state->visible)
cfg |= CFG_GRA_ENA;
- if (to_armada_plane_state(state)->interlace)
+ if (to_armada_plane_state(new_state)->interlace)
cfg |= CFG_GRA_FTOGGLE;
cfg_mask = CFG_GRAFORMAT |
CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
CFG_SWAPYU | CFG_YUV2RGB) |
CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
CFG_GRA_ENA;
- } else if (old_state->visible != state->visible) {
- cfg = state->visible ? CFG_GRA_ENA : 0;
+ } else if (old_state->visible != new_state->visible) {
+ cfg = new_state->visible ? CFG_GRA_ENA : 0;
cfg_mask = CFG_GRA_ENA;
} else {
cfg = cfg_mask = 0;
}
- if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
- drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+ if (drm_rect_width(&old_state->src) != drm_rect_width(&new_state->src) ||
+ drm_rect_width(&old_state->dst) != drm_rect_width(&new_state->dst)) {
cfg_mask |= CFG_GRA_HSMOOTH;
- if (drm_rect_width(&state->src) >> 16 !=
- drm_rect_width(&state->dst))
+ if (drm_rect_width(&new_state->src) >> 16 !=
+ drm_rect_width(&new_state->dst))
cfg |= CFG_GRA_HSMOOTH;
}
@@ -242,8 +251,10 @@ static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
}
static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
unsigned int idx = 0;
diff --git a/drivers/gpu/drm/armada/armada_plane.h b/drivers/gpu/drm/armada/armada_plane.h
index 2707ec781941..51dab8d8da22 100644
--- a/drivers/gpu/drm/armada/armada_plane.h
+++ b/drivers/gpu/drm/armada/armada_plane.h
@@ -26,7 +26,7 @@ int armada_drm_plane_prepare_fb(struct drm_plane *plane,
void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
int armada_drm_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
void armada_plane_reset(struct drm_plane *plane);
struct drm_plane_state *armada_plane_duplicate_state(struct drm_plane *plane);
void armada_plane_destroy_state(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h
index f1e7e56abc02..96501152bafa 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx.h
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h
@@ -11,6 +11,11 @@ struct aspeed_gfx {
struct reset_control *rst;
struct regmap *scu;
+ u32 dac_reg;
+ u32 vga_scratch_reg;
+ u32 throd_val;
+ u32 scan_line_max;
+
struct drm_simple_display_pipe pipe;
struct drm_connector connector;
};
@@ -100,6 +105,3 @@ int aspeed_gfx_create_output(struct drm_device *drm);
/* CRT_THROD */
#define CRT_THROD_LOW(x) (x)
#define CRT_THROD_HIGH(x) ((x) << 8)
-
-/* Default Threshold Seting */
-#define G5_CRT_THROD_VAL (CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3C))
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index e54686c31a90..098f96d4d50d 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -9,8 +9,8 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@@ -59,8 +59,8 @@ static void aspeed_gfx_enable_controller(struct aspeed_gfx *priv)
u32 ctrl1 = readl(priv->base + CRT_CTRL1);
u32 ctrl2 = readl(priv->base + CRT_CTRL2);
- /* SCU2C: set DAC source for display output to Graphics CRT (GFX) */
- regmap_update_bits(priv->scu, 0x2c, BIT(16), BIT(16));
+ /* Set DAC source for display output to Graphics CRT (GFX) */
+ regmap_update_bits(priv->scu, priv->dac_reg, BIT(16), BIT(16));
writel(ctrl1 | CRT_CTRL_EN, priv->base + CRT_CTRL1);
writel(ctrl2 | CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
@@ -74,7 +74,7 @@ static void aspeed_gfx_disable_controller(struct aspeed_gfx *priv)
writel(ctrl1 & ~CRT_CTRL_EN, priv->base + CRT_CTRL1);
writel(ctrl2 & ~CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
- regmap_update_bits(priv->scu, 0x2c, BIT(16), 0);
+ regmap_update_bits(priv->scu, priv->dac_reg, BIT(16), 0);
}
static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv)
@@ -127,7 +127,8 @@ static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv)
* Terminal Count: memory size of one scan line
*/
d_offset = m->hdisplay * bpp / 8;
- t_count = (m->hdisplay * bpp + 127) / 128;
+ t_count = DIV_ROUND_UP(m->hdisplay * bpp, priv->scan_line_max);
+
writel(CRT_DISP_OFFSET(d_offset) | CRT_TERM_COUNT(t_count),
priv->base + CRT_OFFSET);
@@ -135,7 +136,7 @@ static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv)
* Threshold: FIFO thresholds of refill and stop (16 byte chunks
* per line, rounded up)
*/
- writel(G5_CRT_THROD_VAL, priv->base + CRT_THROD);
+ writel(priv->throd_val, priv->base + CRT_THROD);
}
static void aspeed_gfx_pipe_enable(struct drm_simple_display_pipe *pipe,
@@ -219,7 +220,7 @@ static const struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
.enable = aspeed_gfx_pipe_enable,
.disable = aspeed_gfx_pipe_disable,
.update = aspeed_gfx_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
.enable_vblank = aspeed_gfx_enable_vblank,
.disable_vblank = aspeed_gfx_disable_vblank,
};
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 457ec04950f7..b53fee6f1c17 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -7,6 +7,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -57,6 +58,34 @@
* which is available under NDA from ASPEED.
*/
+struct aspeed_gfx_config {
+ u32 dac_reg; /* DAC register in SCU */
+ u32 vga_scratch_reg; /* VGA scratch register in SCU */
+ u32 throd_val; /* Default Threshold Seting */
+ u32 scan_line_max; /* Max memory size of one scan line */
+};
+
+static const struct aspeed_gfx_config ast2400_config = {
+ .dac_reg = 0x2c,
+ .vga_scratch_reg = 0x50,
+ .throd_val = CRT_THROD_LOW(0x1e) | CRT_THROD_HIGH(0x12),
+ .scan_line_max = 64,
+};
+
+static const struct aspeed_gfx_config ast2500_config = {
+ .dac_reg = 0x2c,
+ .vga_scratch_reg = 0x50,
+ .throd_val = CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3c),
+ .scan_line_max = 128,
+};
+
+static const struct of_device_id aspeed_gfx_match[] = {
+ { .compatible = "aspeed,ast2400-gfx", .data = &ast2400_config },
+ { .compatible = "aspeed,ast2500-gfx", .data = &ast2500_config },
+ { },
+};
+MODULE_DEVICE_TABLE(of, aspeed_gfx_match);
+
static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
@@ -97,12 +126,13 @@ static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
return IRQ_NONE;
}
-
-
static int aspeed_gfx_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
+ struct device_node *np = pdev->dev.of_node;
+ const struct aspeed_gfx_config *config;
+ const struct of_device_id *match;
struct resource *res;
int ret;
@@ -111,10 +141,23 @@ static int aspeed_gfx_load(struct drm_device *drm)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
+ match = of_match_device(aspeed_gfx_match, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+ config = match->data;
+
+ priv->dac_reg = config->dac_reg;
+ priv->vga_scratch_reg = config->vga_scratch_reg;
+ priv->throd_val = config->throd_val;
+ priv->scan_line_max = config->scan_line_max;
+
+ priv->scu = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(priv->scu)) {
- dev_err(&pdev->dev, "failed to find SCU regmap\n");
- return PTR_ERR(priv->scu);
+ priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
+ if (IS_ERR(priv->scu)) {
+ dev_err(&pdev->dev, "failed to find SCU regmap\n");
+ return PTR_ERR(priv->scu);
+ }
}
ret = of_reserved_mem_device_init(drm->dev);
@@ -202,14 +245,6 @@ static const struct drm_driver aspeed_gfx_driver = {
.minor = 0,
};
-static const struct of_device_id aspeed_gfx_match[] = {
- { .compatible = "aspeed,ast2500-gfx" },
- { }
-};
-
-#define ASPEED_SCU_VGA0 0x50
-#define ASPEED_SCU_MISC_CTRL 0x2c
-
static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -224,7 +259,7 @@ static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr,
if (val > 3)
return -EINVAL;
- rc = regmap_update_bits(priv->scu, ASPEED_SCU_MISC_CTRL, 0x30000, val << 16);
+ rc = regmap_update_bits(priv->scu, priv->dac_reg, 0x30000, val << 16);
if (rc < 0)
return 0;
@@ -237,7 +272,7 @@ static ssize_t dac_mux_show(struct device *dev, struct device_attribute *attr, c
u32 reg;
int rc;
- rc = regmap_read(priv->scu, ASPEED_SCU_MISC_CTRL, &reg);
+ rc = regmap_read(priv->scu, priv->dac_reg, &reg);
if (rc)
return rc;
@@ -252,7 +287,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
u32 reg;
int rc;
- rc = regmap_read(priv->scu, ASPEED_SCU_VGA0, &reg);
+ rc = regmap_read(priv->scu, priv->vga_scratch_reg, &reg);
if (rc)
return rc;
@@ -284,7 +319,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
if (ret)
return ret;
- dev_set_drvdata(&pdev->dev, priv);
+ platform_set_drvdata(pdev, priv);
ret = sysfs_create_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
if (ret)
diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile
index 2265a8a624dd..438a2d05b115 100644
--- a/drivers/gpu/drm/ast/Makefile
+++ b/drivers/gpu/drm/ast/Makefile
@@ -3,7 +3,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ast-y := ast_cursor.o ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o \
- ast_dp501.o
+ast-y := ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o
obj-$(CONFIG_DRM_AST) := ast.o
diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c
deleted file mode 100644
index fac1ee79c372..000000000000
--- a/drivers/gpu/drm/ast/ast_cursor.c
+++ /dev/null
@@ -1,286 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- * Parts based on xf86-video-ast
- * Copyright (c) 2005 ASPEED Technology Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- */
-/*
- * Authors: Dave Airlie <airlied@redhat.com>
- */
-
-#include <drm/drm_gem_vram_helper.h>
-#include <drm/drm_managed.h>
-
-#include "ast_drv.h"
-
-static void ast_cursor_fini(struct ast_private *ast)
-{
- size_t i;
- struct drm_gem_vram_object *gbo;
-
- for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
- gbo = ast->cursor.gbo[i];
- drm_gem_vram_unpin(gbo);
- drm_gem_vram_put(gbo);
- }
-}
-
-static void ast_cursor_release(struct drm_device *dev, void *ptr)
-{
- struct ast_private *ast = to_ast_private(dev);
-
- ast_cursor_fini(ast);
-}
-
-/*
- * Allocate cursor BOs and pin them at the end of VRAM.
- */
-int ast_cursor_init(struct ast_private *ast)
-{
- struct drm_device *dev = &ast->base;
- size_t size, i;
- struct drm_gem_vram_object *gbo;
- int ret;
-
- size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
-
- for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
- gbo = drm_gem_vram_create(dev, size, 0);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- goto err_drm_gem_vram_put;
- }
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
- DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
- if (ret) {
- drm_gem_vram_put(gbo);
- goto err_drm_gem_vram_put;
- }
- ast->cursor.gbo[i] = gbo;
- }
-
- return drmm_add_action_or_reset(dev, ast_cursor_release, NULL);
-
-err_drm_gem_vram_put:
- while (i) {
- --i;
- gbo = ast->cursor.gbo[i];
- drm_gem_vram_unpin(gbo);
- drm_gem_vram_put(gbo);
- }
- return ret;
-}
-
-static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height)
-{
- union {
- u32 ul;
- u8 b[4];
- } srcdata32[2], data32;
- union {
- u16 us;
- u8 b[2];
- } data16;
- u32 csum = 0;
- s32 alpha_dst_delta, last_alpha_dst_delta;
- u8 __iomem *dstxor;
- const u8 *srcxor;
- int i, j;
- u32 per_pixel_copy, two_pixel_copy;
-
- alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
- last_alpha_dst_delta = alpha_dst_delta - (width << 1);
-
- srcxor = src;
- dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
- per_pixel_copy = width & 1;
- two_pixel_copy = width >> 1;
-
- for (j = 0; j < height; j++) {
- for (i = 0; i < two_pixel_copy; i++) {
- srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
- srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
- data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
- data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
- data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
- data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
-
- writel(data32.ul, dstxor);
- csum += data32.ul;
-
- dstxor += 4;
- srcxor += 8;
-
- }
-
- for (i = 0; i < per_pixel_copy; i++) {
- srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
- data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
- data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
- writew(data16.us, dstxor);
- csum += (u32)data16.us;
-
- dstxor += 2;
- srcxor += 4;
- }
- dstxor += last_alpha_dst_delta;
- }
-
- /* write checksum + signature */
- dst += AST_HWC_SIZE;
- writel(csum, dst);
- writel(width, dst + AST_HWC_SIGNATURE_SizeX);
- writel(height, dst + AST_HWC_SIGNATURE_SizeY);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
- writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
-}
-
-int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
-{
- struct drm_device *dev = &ast->base;
- struct drm_gem_vram_object *dst_gbo = ast->cursor.gbo[ast->cursor.next_index];
- struct drm_gem_vram_object *src_gbo = drm_gem_vram_of_gem(fb->obj[0]);
- struct dma_buf_map src_map, dst_map;
- void __iomem *dst;
- void *src;
- int ret;
-
- if (drm_WARN_ON_ONCE(dev, fb->width > AST_MAX_HWC_WIDTH) ||
- drm_WARN_ON_ONCE(dev, fb->height > AST_MAX_HWC_HEIGHT))
- return -EINVAL;
-
- ret = drm_gem_vram_vmap(src_gbo, &src_map);
- if (ret)
- return ret;
- src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
-
- ret = drm_gem_vram_vmap(dst_gbo, &dst_map);
- if (ret)
- goto err_drm_gem_vram_vunmap;
- dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
-
- /* do data transfer to cursor BO */
- update_cursor_image(dst, src, fb->width, fb->height);
-
- drm_gem_vram_vunmap(dst_gbo, &dst_map);
- drm_gem_vram_vunmap(src_gbo, &src_map);
-
- return 0;
-
-err_drm_gem_vram_vunmap:
- drm_gem_vram_vunmap(src_gbo, &src_map);
- return ret;
-}
-
-static void ast_cursor_set_base(struct ast_private *ast, u64 address)
-{
- u8 addr0 = (address >> 3) & 0xff;
- u8 addr1 = (address >> 11) & 0xff;
- u8 addr2 = (address >> 19) & 0xff;
-
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
-}
-
-void ast_cursor_page_flip(struct ast_private *ast)
-{
- struct drm_device *dev = &ast->base;
- struct drm_gem_vram_object *gbo;
- s64 off;
-
- gbo = ast->cursor.gbo[ast->cursor.next_index];
-
- off = drm_gem_vram_offset(gbo);
- if (drm_WARN_ON_ONCE(dev, off < 0))
- return; /* Bug: we didn't pin the cursor HW BO to VRAM. */
-
- ast_cursor_set_base(ast, off);
-
- ++ast->cursor.next_index;
- ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
-}
-
-static void ast_cursor_set_location(struct ast_private *ast, u16 x, u16 y,
- u8 x_offset, u8 y_offset)
-{
- u8 x0 = (x & 0x00ff);
- u8 x1 = (x & 0x0f00) >> 8;
- u8 y0 = (y & 0x00ff);
- u8 y1 = (y & 0x0700) >> 8;
-
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0);
- ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
-}
-
-void ast_cursor_show(struct ast_private *ast, int x, int y,
- unsigned int offset_x, unsigned int offset_y)
-{
- struct drm_device *dev = &ast->base;
- struct drm_gem_vram_object *gbo = ast->cursor.gbo[ast->cursor.next_index];
- struct dma_buf_map map;
- u8 x_offset, y_offset;
- u8 __iomem *dst;
- u8 __iomem *sig;
- u8 jreg;
- int ret;
-
- ret = drm_gem_vram_vmap(gbo, &map);
- if (drm_WARN_ONCE(dev, ret, "drm_gem_vram_vmap() failed, ret=%d\n", ret))
- return;
- dst = map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
-
- sig = dst + AST_HWC_SIZE;
- writel(x, sig + AST_HWC_SIGNATURE_X);
- writel(y, sig + AST_HWC_SIGNATURE_Y);
-
- drm_gem_vram_vunmap(gbo, &map);
-
- if (x < 0) {
- x_offset = (-x) + offset_x;
- x = 0;
- } else {
- x_offset = offset_x;
- }
- if (y < 0) {
- y_offset = (-y) + offset_y;
- y = 0;
- } else {
- y_offset = offset_y;
- }
-
- ast_cursor_set_location(ast, x, y, x_offset, y_offset);
-
- /* dummy write to fire HWC */
- jreg = 0x02 |
- 0x01; /* enable ARGB4444 cursor */
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
-}
-
-void ast_cursor_hide(struct ast_private *ast)
-{
- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
-}
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index ea8164e7a6dc..01837bea18c2 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
@@ -138,6 +139,7 @@ static void ast_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
+ drm_atomic_helper_shutdown(dev);
}
static int ast_drm_freeze(struct drm_device *dev)
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index f871fc36c2f7..e82ab8628770 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -81,6 +81,9 @@ enum ast_tx_chip {
#define AST_DRAM_4Gx16 7
#define AST_DRAM_8Gx16 8
+/*
+ * Cursor plane
+ */
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
@@ -99,6 +102,28 @@ enum ast_tx_chip {
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
+struct ast_cursor_plane {
+ struct drm_plane base;
+
+ struct {
+ struct drm_gem_vram_object *gbo;
+ struct dma_buf_map map;
+ u64 off;
+ } hwc[AST_DEFAULT_HWC_NUM];
+
+ unsigned int next_hwc_index;
+};
+
+static inline struct ast_cursor_plane *
+to_ast_cursor_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct ast_cursor_plane, base);
+}
+
+/*
+ * Connector with i2c channel
+ */
+
struct ast_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@@ -116,6 +141,10 @@ to_ast_connector(struct drm_connector *connector)
return container_of(connector, struct ast_connector, base);
}
+/*
+ * Device
+ */
+
struct ast_private {
struct drm_device base;
@@ -130,13 +159,8 @@ struct ast_private {
int fb_mtrr;
- struct {
- struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
- unsigned int next_index;
- } cursor;
-
struct drm_plane primary_plane;
- struct drm_plane cursor_plane;
+ struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct ast_connector connector;
@@ -179,6 +203,9 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
#define AST_IO_VGAIR1_VREFRESH BIT(3)
+#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
+#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
+
#define __ast_read(x) \
static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
u##x val = 0;\
@@ -314,12 +341,4 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
-/* ast_cursor.c */
-int ast_cursor_init(struct ast_private *ast);
-int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb);
-void ast_cursor_page_flip(struct ast_private *ast);
-void ast_cursor_show(struct ast_private *ast, int x, int y,
- unsigned int offset_x, unsigned int offset_y);
-void ast_cursor_hide(struct ast_private *ast);
-
#endif
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 988b270fea5e..36d9575aa27b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_plane_helper.h>
@@ -535,48 +536,54 @@ static const uint32_t ast_primary_plane_formats[] = {
};
static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state;
struct ast_crtc_state *ast_crtc_state;
int ret;
- if (!state->crtc)
+ if (!new_plane_state->crtc)
return 0;
- crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true);
if (ret)
return ret;
- if (!state->visible)
+ if (!new_plane_state->visible)
return 0;
ast_crtc_state = to_ast_crtc_state(crtc_state);
- ast_crtc_state->format = state->fb->format;
+ ast_crtc_state->format = new_plane_state->fb->format;
return 0;
}
static void
ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct drm_device *dev = plane->dev;
struct ast_private *ast = to_ast_private(dev);
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_gem_vram_object *gbo;
s64 gpu_addr;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = new_state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
if (!old_fb || (fb->format != old_fb->format)) {
- struct drm_crtc_state *crtc_state = state->crtc->state;
+ struct drm_crtc_state *crtc_state = new_state->crtc->state;
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
@@ -597,7 +604,7 @@ ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
static void
ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(plane->dev);
@@ -621,55 +628,161 @@ static const struct drm_plane_funcs ast_primary_plane_funcs = {
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
+static int ast_primary_plane_init(struct ast_private *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct drm_plane *primary_plane = &ast->primary_plane;
+ int ret;
+
+ ret = drm_universal_plane_init(dev, primary_plane, 0x01,
+ &ast_primary_plane_funcs,
+ ast_primary_plane_formats,
+ ARRAY_SIZE(ast_primary_plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
+ return ret;
+ }
+ drm_plane_helper_add(primary_plane, &ast_primary_plane_helper_funcs);
+
+ return 0;
+}
+
/*
* Cursor plane
*/
-static const uint32_t ast_cursor_plane_formats[] = {
- DRM_FORMAT_ARGB8888,
-};
+static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height)
+{
+ union {
+ u32 ul;
+ u8 b[4];
+ } srcdata32[2], data32;
+ union {
+ u16 us;
+ u8 b[2];
+ } data16;
+ u32 csum = 0;
+ s32 alpha_dst_delta, last_alpha_dst_delta;
+ u8 __iomem *dstxor;
+ const u8 *srcxor;
+ int i, j;
+ u32 per_pixel_copy, two_pixel_copy;
+
+ alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
+ last_alpha_dst_delta = alpha_dst_delta - (width << 1);
+
+ srcxor = src;
+ dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
+ per_pixel_copy = width & 1;
+ two_pixel_copy = width >> 1;
+
+ for (j = 0; j < height; j++) {
+ for (i = 0; i < two_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
+ data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
+ data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
+
+ writel(data32.ul, dstxor);
+ csum += data32.ul;
+
+ dstxor += 4;
+ srcxor += 8;
+
+ }
-static int
-ast_cursor_plane_helper_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ for (i = 0; i < per_pixel_copy; i++) {
+ srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
+ data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
+ data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
+ writew(data16.us, dstxor);
+ csum += (u32)data16.us;
+
+ dstxor += 2;
+ srcxor += 4;
+ }
+ dstxor += last_alpha_dst_delta;
+ }
+
+ /* write checksum + signature */
+ dst += AST_HWC_SIZE;
+ writel(csum, dst);
+ writel(width, dst + AST_HWC_SIGNATURE_SizeX);
+ writel(height, dst + AST_HWC_SIGNATURE_SizeY);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
+ writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
+}
+
+static void ast_set_cursor_base(struct ast_private *ast, u64 address)
{
- struct drm_framebuffer *fb = new_state->fb;
- struct drm_crtc *crtc = new_state->crtc;
- struct ast_private *ast;
- int ret;
+ u8 addr0 = (address >> 3) & 0xff;
+ u8 addr1 = (address >> 11) & 0xff;
+ u8 addr2 = (address >> 19) & 0xff;
- if (!crtc || !fb)
- return 0;
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
+}
- ast = to_ast_private(plane->dev);
+static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y,
+ u8 x_offset, u8 y_offset)
+{
+ u8 x0 = (x & 0x00ff);
+ u8 x1 = (x & 0x0f00) >> 8;
+ u8 y0 = (y & 0x00ff);
+ u8 y1 = (y & 0x0700) >> 8;
+
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0);
+ ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
+}
- ret = ast_cursor_blit(ast, fb);
- if (ret)
- return ret;
+static void ast_set_cursor_enabled(struct ast_private *ast, bool enabled)
+{
+ static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
+ AST_IO_VGACRCB_HWC_ENABLED);
- return 0;
+ u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP;
+
+ if (enabled)
+ vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED;
+
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, mask, vgacrcb);
}
+static const uint32_t ast_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_framebuffer *fb = state->fb;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
int ret;
- if (!state->crtc)
+ if (!new_plane_state->crtc)
return 0;
- crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true);
if (ret)
return ret;
- if (!state->visible)
+ if (!new_plane_state->visible)
return 0;
if (fb->width > AST_MAX_HWC_WIDTH || fb->height > AST_MAX_HWC_HEIGHT)
@@ -680,51 +793,192 @@ static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
static void
ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
+ struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
+ struct drm_framebuffer *fb = new_state->fb;
struct ast_private *ast = to_ast_private(plane->dev);
+ struct dma_buf_map dst_map =
+ ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].map;
+ u64 dst_off =
+ ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].off;
+ struct dma_buf_map src_map = shadow_plane_state->map[0];
unsigned int offset_x, offset_y;
+ u16 x, y;
+ u8 x_offset, y_offset;
+ u8 __iomem *dst;
+ u8 __iomem *sig;
+ const u8 *src;
+
+ src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
+ dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
+ sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
+
+ /*
+ * Do data transfer to HW cursor BO. If a new cursor image was installed,
+ * point the scanout engine to dst_gbo's offset and page-flip the HWC buffers.
+ */
+
+ ast_update_cursor_image(dst, src, fb->width, fb->height);
+
+ if (new_state->fb != old_state->fb) {
+ ast_set_cursor_base(ast, dst_off);
+
+ ++ast_cursor_plane->next_hwc_index;
+ ast_cursor_plane->next_hwc_index %= ARRAY_SIZE(ast_cursor_plane->hwc);
+ }
+
+ /*
+ * Update location in HWC signature and registers.
+ */
+
+ writel(new_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
+ writel(new_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
offset_x = AST_MAX_HWC_WIDTH - fb->width;
- offset_y = AST_MAX_HWC_WIDTH - fb->height;
+ offset_y = AST_MAX_HWC_HEIGHT - fb->height;
- if (state->fb != old_state->fb) {
- /* A new cursor image was installed. */
- ast_cursor_page_flip(ast);
+ if (new_state->crtc_x < 0) {
+ x_offset = (-new_state->crtc_x) + offset_x;
+ x = 0;
+ } else {
+ x_offset = offset_x;
+ x = new_state->crtc_x;
+ }
+ if (new_state->crtc_y < 0) {
+ y_offset = (-new_state->crtc_y) + offset_y;
+ y = 0;
+ } else {
+ y_offset = offset_y;
+ y = new_state->crtc_y;
}
- ast_cursor_show(ast, state->crtc_x, state->crtc_y,
- offset_x, offset_y);
+ ast_set_cursor_location(ast, x, y, x_offset, y_offset);
+
+ /* Dummy write to enable HWC and make the HW pick-up the changes. */
+ ast_set_cursor_enabled(ast, true);
}
static void
ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(plane->dev);
- ast_cursor_hide(ast);
+ ast_set_cursor_enabled(ast, false);
}
static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
- .prepare_fb = ast_cursor_plane_helper_prepare_fb,
- .cleanup_fb = NULL, /* not required for cursor plane */
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_cursor_plane_helper_atomic_check,
.atomic_update = ast_cursor_plane_helper_atomic_update,
.atomic_disable = ast_cursor_plane_helper_atomic_disable,
};
+static void ast_cursor_plane_destroy(struct drm_plane *plane)
+{
+ struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
+ size_t i;
+ struct drm_gem_vram_object *gbo;
+ struct dma_buf_map map;
+
+ for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) {
+ gbo = ast_cursor_plane->hwc[i].gbo;
+ map = ast_cursor_plane->hwc[i].map;
+ drm_gem_vram_vunmap(gbo, &map);
+ drm_gem_vram_unpin(gbo);
+ drm_gem_vram_put(gbo);
+ }
+
+ drm_plane_cleanup(plane);
+}
+
static const struct drm_plane_funcs ast_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .destroy = ast_cursor_plane_destroy,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
+static int ast_cursor_plane_init(struct ast_private *ast)
+{
+ struct drm_device *dev = &ast->base;
+ struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
+ struct drm_plane *cursor_plane = &ast_cursor_plane->base;
+ size_t size, i;
+ struct drm_gem_vram_object *gbo;
+ struct dma_buf_map map;
+ int ret;
+ s64 off;
+
+ /*
+ * Allocate backing storage for cursors. The BOs are permanently
+ * pinned to the top end of the VRAM.
+ */
+
+ size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
+
+ for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) {
+ gbo = drm_gem_vram_create(dev, size, 0);
+ if (IS_ERR(gbo)) {
+ ret = PTR_ERR(gbo);
+ goto err_hwc;
+ }
+ ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
+ DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
+ if (ret)
+ goto err_drm_gem_vram_put;
+ ret = drm_gem_vram_vmap(gbo, &map);
+ if (ret)
+ goto err_drm_gem_vram_unpin;
+ off = drm_gem_vram_offset(gbo);
+ if (off < 0) {
+ ret = off;
+ goto err_drm_gem_vram_vunmap;
+ }
+ ast_cursor_plane->hwc[i].gbo = gbo;
+ ast_cursor_plane->hwc[i].map = map;
+ ast_cursor_plane->hwc[i].off = off;
+ }
+
+ /*
+ * Create the cursor plane. The plane's destroy callback will release
+ * the backing storages' BO memory.
+ */
+
+ ret = drm_universal_plane_init(dev, cursor_plane, 0x01,
+ &ast_cursor_plane_funcs,
+ ast_cursor_plane_formats,
+ ARRAY_SIZE(ast_cursor_plane_formats),
+ NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+ if (ret) {
+ drm_err(dev, "drm_universal_plane failed(): %d\n", ret);
+ goto err_hwc;
+ }
+ drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
+
+ return 0;
+
+err_hwc:
+ while (i) {
+ --i;
+ gbo = ast_cursor_plane->hwc[i].gbo;
+ map = ast_cursor_plane->hwc[i].map;
+err_drm_gem_vram_vunmap:
+ drm_gem_vram_vunmap(gbo, &map);
+err_drm_gem_vram_unpin:
+ drm_gem_vram_unpin(gbo);
+err_drm_gem_vram_put:
+ drm_gem_vram_put(gbo);
+ }
+ return ret;
+}
+
/*
* CRTC
*/
@@ -917,7 +1171,7 @@ static int ast_crtc_init(struct drm_device *dev)
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane,
- &ast->cursor_plane, &ast_crtc_funcs,
+ &ast->cursor_plane.base, &ast_crtc_funcs,
NULL);
if (ret)
return ret;
@@ -1109,10 +1363,6 @@ int ast_mode_config_init(struct ast_private *ast)
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
- ret = ast_cursor_init(ast);
- if (ret)
- return ret;
-
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
@@ -1138,30 +1388,14 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.helper_private = &ast_mode_config_helper_funcs;
- memset(&ast->primary_plane, 0, sizeof(ast->primary_plane));
- ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01,
- &ast_primary_plane_funcs,
- ast_primary_plane_formats,
- ARRAY_SIZE(ast_primary_plane_formats),
- NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- drm_err(dev, "ast: drm_universal_plane_init() failed: %d\n", ret);
+
+ ret = ast_primary_plane_init(ast);
+ if (ret)
return ret;
- }
- drm_plane_helper_add(&ast->primary_plane,
- &ast_primary_plane_helper_funcs);
- ret = drm_universal_plane_init(dev, &ast->cursor_plane, 0x01,
- &ast_cursor_plane_funcs,
- ast_cursor_plane_formats,
- ARRAY_SIZE(ast_cursor_plane_formats),
- NULL, DRM_PLANE_TYPE_CURSOR, NULL);
- if (ret) {
- drm_err(dev, "drm_universal_plane_failed(): %d\n", ret);
+ ret = ast_cursor_plane_init(ast);
+ if (ret)
return ret;
- }
- drm_plane_helper_add(&ast->cursor_plane,
- &ast_cursor_plane_helper_funcs);
ast_crtc_init(dev);
ast_encoder_init(dev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 98fb53b75f77..65af56e47129 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -557,103 +557,10 @@ static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-struct atmel_hlcdc_dc_commit {
- struct work_struct work;
- struct drm_device *dev;
- struct drm_atomic_state *state;
-};
-
-static void
-atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
-{
- struct drm_device *dev = commit->dev;
- struct atmel_hlcdc_dc *dc = dev->dev_private;
- struct drm_atomic_state *old_state = commit->state;
-
- /* Apply the atomic update. */
- drm_atomic_helper_commit_modeset_disables(dev, old_state);
- drm_atomic_helper_commit_planes(dev, old_state, 0);
- drm_atomic_helper_commit_modeset_enables(dev, old_state);
-
- drm_atomic_helper_wait_for_vblanks(dev, old_state);
-
- drm_atomic_helper_cleanup_planes(dev, old_state);
-
- drm_atomic_state_put(old_state);
-
- /* Complete the commit, wake up any waiter. */
- spin_lock(&dc->commit.wait.lock);
- dc->commit.pending = false;
- wake_up_all_locked(&dc->commit.wait);
- spin_unlock(&dc->commit.wait.lock);
-
- kfree(commit);
-}
-
-static void atmel_hlcdc_dc_atomic_work(struct work_struct *work)
-{
- struct atmel_hlcdc_dc_commit *commit =
- container_of(work, struct atmel_hlcdc_dc_commit, work);
-
- atmel_hlcdc_dc_atomic_complete(commit);
-}
-
-static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool async)
-{
- struct atmel_hlcdc_dc *dc = dev->dev_private;
- struct atmel_hlcdc_dc_commit *commit;
- int ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- /* Allocate the commit object. */
- commit = kzalloc(sizeof(*commit), GFP_KERNEL);
- if (!commit) {
- ret = -ENOMEM;
- goto error;
- }
-
- INIT_WORK(&commit->work, atmel_hlcdc_dc_atomic_work);
- commit->dev = dev;
- commit->state = state;
-
- spin_lock(&dc->commit.wait.lock);
- ret = wait_event_interruptible_locked(dc->commit.wait,
- !dc->commit.pending);
- if (ret == 0)
- dc->commit.pending = true;
- spin_unlock(&dc->commit.wait.lock);
-
- if (ret)
- goto err_free;
-
- /* We have our own synchronization through the commit lock. */
- BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
-
- /* Swap state succeeded, this is the point of no return. */
- drm_atomic_state_get(state);
- if (async)
- queue_work(dc->wq, &commit->work);
- else
- atmel_hlcdc_dc_atomic_complete(commit);
-
- return 0;
-
-err_free:
- kfree(commit);
-error:
- drm_atomic_helper_cleanup_planes(dev, state);
- return ret;
-}
-
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
- .atomic_commit = atmel_hlcdc_dc_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
@@ -712,11 +619,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
if (!dc)
return -ENOMEM;
- dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0);
- if (!dc->wq)
- return -ENOMEM;
-
- init_waitqueue_head(&dc->commit.wait);
dc->desc = match->data;
dc->hlcdc = dev_get_drvdata(dev->dev->parent);
dev->dev_private = dc;
@@ -724,7 +626,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = clk_prepare_enable(dc->hlcdc->periph_clk);
if (ret) {
dev_err(dev->dev, "failed to enable periph_clk\n");
- goto err_destroy_wq;
+ return ret;
}
pm_runtime_enable(dev->dev);
@@ -761,9 +663,6 @@ err_periph_clk_disable:
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
-err_destroy_wq:
- destroy_workqueue(dc->wq);
-
return ret;
}
@@ -771,7 +670,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
- flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
drm_atomic_helper_shutdown(dev);
drm_mode_config_cleanup(dev);
@@ -784,7 +682,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
- destroy_workqueue(dc->wq);
}
static int atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 469d4507e576..5b5c774e0edf 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -331,9 +331,7 @@ struct atmel_hlcdc_dc_desc {
* @crtc: CRTC provided by the display controller
* @planes: instantiated planes
* @layers: active HLCDC layers
- * @wq: display controller workqueue
* @suspend: used to store the HLCDC state when entering suspend
- * @commit: used for async commit handling
*/
struct atmel_hlcdc_dc {
const struct atmel_hlcdc_dc_desc *desc;
@@ -341,15 +339,10 @@ struct atmel_hlcdc_dc {
struct atmel_hlcdc *hlcdc;
struct drm_crtc *crtc;
struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS];
- struct workqueue_struct *wq;
struct {
u32 imr;
struct drm_atomic_state *state;
} suspend;
- struct {
- wait_queue_head_t wait;
- bool pending;
- } commit;
};
extern struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 15bc93163833..a077d93c78d7 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -593,22 +593,23 @@ atmel_hlcdc_plane_update_disc_area(struct atmel_hlcdc_plane *plane,
}
static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
- struct drm_plane_state *s)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *s = drm_atomic_get_new_plane_state(state, p);
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
- struct atmel_hlcdc_plane_state *state =
+ struct atmel_hlcdc_plane_state *hstate =
drm_plane_state_to_atmel_hlcdc_plane_state(s);
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
- struct drm_framebuffer *fb = state->base.fb;
+ struct drm_framebuffer *fb = hstate->base.fb;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
int ret;
int i;
- if (!state->base.crtc || WARN_ON(!fb))
+ if (!hstate->base.crtc || WARN_ON(!fb))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc);
mode = &crtc_state->adjusted_mode;
ret = drm_atomic_helper_check_plane_state(s, crtc_state,
@@ -617,101 +618,101 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (ret || !s->visible)
return ret;
- state->src_x = s->src.x1;
- state->src_y = s->src.y1;
- state->src_w = drm_rect_width(&s->src);
- state->src_h = drm_rect_height(&s->src);
- state->crtc_x = s->dst.x1;
- state->crtc_y = s->dst.y1;
- state->crtc_w = drm_rect_width(&s->dst);
- state->crtc_h = drm_rect_height(&s->dst);
+ hstate->src_x = s->src.x1;
+ hstate->src_y = s->src.y1;
+ hstate->src_w = drm_rect_width(&s->src);
+ hstate->src_h = drm_rect_height(&s->src);
+ hstate->crtc_x = s->dst.x1;
+ hstate->crtc_y = s->dst.y1;
+ hstate->crtc_w = drm_rect_width(&s->dst);
+ hstate->crtc_h = drm_rect_height(&s->dst);
- if ((state->src_x | state->src_y | state->src_w | state->src_h) &
+ if ((hstate->src_x | hstate->src_y | hstate->src_w | hstate->src_h) &
SUBPIXEL_MASK)
return -EINVAL;
- state->src_x >>= 16;
- state->src_y >>= 16;
- state->src_w >>= 16;
- state->src_h >>= 16;
+ hstate->src_x >>= 16;
+ hstate->src_y >>= 16;
+ hstate->src_w >>= 16;
+ hstate->src_h >>= 16;
- state->nplanes = fb->format->num_planes;
- if (state->nplanes > ATMEL_HLCDC_LAYER_MAX_PLANES)
+ hstate->nplanes = fb->format->num_planes;
+ if (hstate->nplanes > ATMEL_HLCDC_LAYER_MAX_PLANES)
return -EINVAL;
- for (i = 0; i < state->nplanes; i++) {
+ for (i = 0; i < hstate->nplanes; i++) {
unsigned int offset = 0;
int xdiv = i ? fb->format->hsub : 1;
int ydiv = i ? fb->format->vsub : 1;
- state->bpp[i] = fb->format->cpp[i];
- if (!state->bpp[i])
+ hstate->bpp[i] = fb->format->cpp[i];
+ if (!hstate->bpp[i])
return -EINVAL;
- switch (state->base.rotation & DRM_MODE_ROTATE_MASK) {
+ switch (hstate->base.rotation & DRM_MODE_ROTATE_MASK) {
case DRM_MODE_ROTATE_90:
- offset = (state->src_y / ydiv) *
+ offset = (hstate->src_y / ydiv) *
fb->pitches[i];
- offset += ((state->src_x + state->src_w - 1) /
- xdiv) * state->bpp[i];
- state->xstride[i] = -(((state->src_h - 1) / ydiv) *
+ offset += ((hstate->src_x + hstate->src_w - 1) /
+ xdiv) * hstate->bpp[i];
+ hstate->xstride[i] = -(((hstate->src_h - 1) / ydiv) *
fb->pitches[i]) -
- (2 * state->bpp[i]);
- state->pstride[i] = fb->pitches[i] - state->bpp[i];
+ (2 * hstate->bpp[i]);
+ hstate->pstride[i] = fb->pitches[i] - hstate->bpp[i];
break;
case DRM_MODE_ROTATE_180:
- offset = ((state->src_y + state->src_h - 1) /
+ offset = ((hstate->src_y + hstate->src_h - 1) /
ydiv) * fb->pitches[i];
- offset += ((state->src_x + state->src_w - 1) /
- xdiv) * state->bpp[i];
- state->xstride[i] = ((((state->src_w - 1) / xdiv) - 1) *
- state->bpp[i]) - fb->pitches[i];
- state->pstride[i] = -2 * state->bpp[i];
+ offset += ((hstate->src_x + hstate->src_w - 1) /
+ xdiv) * hstate->bpp[i];
+ hstate->xstride[i] = ((((hstate->src_w - 1) / xdiv) - 1) *
+ hstate->bpp[i]) - fb->pitches[i];
+ hstate->pstride[i] = -2 * hstate->bpp[i];
break;
case DRM_MODE_ROTATE_270:
- offset = ((state->src_y + state->src_h - 1) /
+ offset = ((hstate->src_y + hstate->src_h - 1) /
ydiv) * fb->pitches[i];
- offset += (state->src_x / xdiv) * state->bpp[i];
- state->xstride[i] = ((state->src_h - 1) / ydiv) *
+ offset += (hstate->src_x / xdiv) * hstate->bpp[i];
+ hstate->xstride[i] = ((hstate->src_h - 1) / ydiv) *
fb->pitches[i];
- state->pstride[i] = -fb->pitches[i] - state->bpp[i];
+ hstate->pstride[i] = -fb->pitches[i] - hstate->bpp[i];
break;
case DRM_MODE_ROTATE_0:
default:
- offset = (state->src_y / ydiv) * fb->pitches[i];
- offset += (state->src_x / xdiv) * state->bpp[i];
- state->xstride[i] = fb->pitches[i] -
- ((state->src_w / xdiv) *
- state->bpp[i]);
- state->pstride[i] = 0;
+ offset = (hstate->src_y / ydiv) * fb->pitches[i];
+ offset += (hstate->src_x / xdiv) * hstate->bpp[i];
+ hstate->xstride[i] = fb->pitches[i] -
+ ((hstate->src_w / xdiv) *
+ hstate->bpp[i]);
+ hstate->pstride[i] = 0;
break;
}
- state->offsets[i] = offset + fb->offsets[i];
+ hstate->offsets[i] = offset + fb->offsets[i];
}
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
- if (drm_rotation_90_or_270(state->base.rotation)) {
- swap(state->src_w, state->src_h);
+ if (drm_rotation_90_or_270(hstate->base.rotation)) {
+ swap(hstate->src_w, hstate->src_h);
}
if (!desc->layout.size &&
- (mode->hdisplay != state->crtc_w ||
- mode->vdisplay != state->crtc_h))
+ (mode->hdisplay != hstate->crtc_w ||
+ mode->vdisplay != hstate->crtc_h))
return -EINVAL;
- if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) &&
+ if ((hstate->crtc_h != hstate->src_h || hstate->crtc_w != hstate->src_w) &&
(!desc->layout.memsize ||
- state->base.fb->format->has_alpha))
+ hstate->base.fb->format->has_alpha))
return -EINVAL;
return 0;
}
static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
@@ -730,27 +731,29 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
}
static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
- struct drm_plane_state *old_s)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_s = drm_atomic_get_new_plane_state(state,
+ p);
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
- struct atmel_hlcdc_plane_state *state =
- drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
+ struct atmel_hlcdc_plane_state *hstate =
+ drm_plane_state_to_atmel_hlcdc_plane_state(new_s);
u32 sr;
- if (!p->state->crtc || !p->state->fb)
+ if (!new_s->crtc || !new_s->fb)
return;
- if (!state->base.visible) {
- atmel_hlcdc_plane_atomic_disable(p, old_s);
+ if (!hstate->base.visible) {
+ atmel_hlcdc_plane_atomic_disable(p, state);
return;
}
- atmel_hlcdc_plane_update_pos_and_size(plane, state);
- atmel_hlcdc_plane_update_general_settings(plane, state);
- atmel_hlcdc_plane_update_format(plane, state);
- atmel_hlcdc_plane_update_clut(plane, state);
- atmel_hlcdc_plane_update_buffers(plane, state);
- atmel_hlcdc_plane_update_disc_area(plane, state);
+ atmel_hlcdc_plane_update_pos_and_size(plane, hstate);
+ atmel_hlcdc_plane_update_general_settings(plane, hstate);
+ atmel_hlcdc_plane_update_format(plane, hstate);
+ atmel_hlcdc_plane_update_clut(plane, hstate);
+ atmel_hlcdc_plane_update_buffers(plane, hstate);
+ atmel_hlcdc_plane_update_disc_area(plane, hstate);
/* Enable the overrun interrupts. */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IER,
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index d0c65610ebb5..989a05bc8197 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2457,7 +2457,7 @@ clk_disable:
static int cdns_mhdp_remove(struct platform_device *pdev)
{
- struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev);
+ struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
unsigned long timeout = msecs_to_jiffies(100);
bool stop_fw = false;
int ret;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index dda60051854b..5b4547e0f775 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -53,6 +53,45 @@ void __drm_crtc_commit_free(struct kref *kref)
EXPORT_SYMBOL(__drm_crtc_commit_free);
/**
+ * drm_crtc_commit_wait - Waits for a commit to complete
+ * @commit: &drm_crtc_commit to wait for
+ *
+ * Waits for a given &drm_crtc_commit to be programmed into the
+ * hardware and flipped to.
+ *
+ * Returns:
+ *
+ * 0 on success, a negative error code otherwise.
+ */
+int drm_crtc_commit_wait(struct drm_crtc_commit *commit)
+{
+ unsigned long timeout = 10 * HZ;
+ int ret;
+
+ if (!commit)
+ return 0;
+
+ ret = wait_for_completion_timeout(&commit->hw_done, timeout);
+ if (!ret) {
+ DRM_ERROR("hw_done timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Currently no support for overwriting flips, hence
+ * stall for previous one to execute completely.
+ */
+ ret = wait_for_completion_timeout(&commit->flip_done, timeout);
+ if (!ret) {
+ DRM_ERROR("flip_done timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_crtc_commit_wait);
+
+/**
* drm_atomic_state_default_release -
* release memory initialized by drm_atomic_state_init
* @state: atomic state
@@ -578,13 +617,9 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
ret = drm_plane_check_pixel_format(plane, fb->format->format,
fb->modifier);
if (ret) {
- struct drm_format_name_buf format_name;
-
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n",
plane->base.id, plane->name,
- drm_get_format_name(fb->format->format,
- &format_name),
- fb->modifier);
+ &fb->format->format, fb->modifier);
return ret;
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 560aaecba31b..47ced8bf7e36 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -902,7 +902,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
if (!funcs || !funcs->atomic_check)
continue;
- ret = funcs->atomic_check(plane, new_plane_state);
+ ret = funcs->atomic_check(plane, state);
if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
plane->base.id, plane->name);
@@ -1742,7 +1742,7 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
return -EBUSY;
}
- return funcs->atomic_async_check(plane, new_plane_state);
+ return funcs->atomic_async_check(plane, state);
}
EXPORT_SYMBOL(drm_atomic_helper_async_check);
@@ -1772,7 +1772,7 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
struct drm_framebuffer *old_fb = plane->state->fb;
funcs = plane->helper_private;
- funcs->atomic_async_update(plane, plane_state);
+ funcs->atomic_async_update(plane, state);
/*
* ->atomic_async_update() is supposed to update the
@@ -2202,70 +2202,27 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
struct drm_plane_state *old_plane_state;
struct drm_connector *conn;
struct drm_connector_state *old_conn_state;
- struct drm_crtc_commit *commit;
int i;
long ret;
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
- commit = old_crtc_state->commit;
-
- if (!commit)
- continue;
-
- ret = wait_for_completion_timeout(&commit->hw_done,
- 10*HZ);
- if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
- crtc->base.id, crtc->name);
-
- /* Currently no support for overwriting flips, hence
- * stall for previous one to execute completely. */
- ret = wait_for_completion_timeout(&commit->flip_done,
- 10*HZ);
- if (ret == 0)
- DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
+ ret = drm_crtc_commit_wait(old_crtc_state->commit);
+ if (ret)
+ DRM_ERROR("[CRTC:%d:%s] commit wait timed out\n",
crtc->base.id, crtc->name);
}
for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
- commit = old_conn_state->commit;
-
- if (!commit)
- continue;
-
- ret = wait_for_completion_timeout(&commit->hw_done,
- 10*HZ);
- if (ret == 0)
- DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
- conn->base.id, conn->name);
-
- /* Currently no support for overwriting flips, hence
- * stall for previous one to execute completely. */
- ret = wait_for_completion_timeout(&commit->flip_done,
- 10*HZ);
- if (ret == 0)
- DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
+ ret = drm_crtc_commit_wait(old_conn_state->commit);
+ if (ret)
+ DRM_ERROR("[CONNECTOR:%d:%s] commit wait timed out\n",
conn->base.id, conn->name);
}
for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
- commit = old_plane_state->commit;
-
- if (!commit)
- continue;
-
- ret = wait_for_completion_timeout(&commit->hw_done,
- 10*HZ);
- if (ret == 0)
- DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
- plane->base.id, plane->name);
-
- /* Currently no support for overwriting flips, hence
- * stall for previous one to execute completely. */
- ret = wait_for_completion_timeout(&commit->flip_done,
- 10*HZ);
- if (ret == 0)
- DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
+ ret = drm_crtc_commit_wait(old_plane_state->commit);
+ if (ret)
+ DRM_ERROR("[PLANE:%d:%s] commit wait timed out\n",
plane->base.id, plane->name);
}
}
@@ -2571,9 +2528,9 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
no_disable)
continue;
- funcs->atomic_disable(plane, old_plane_state);
+ funcs->atomic_disable(plane, old_state);
} else if (new_plane_state->crtc || disabling) {
- funcs->atomic_update(plane, old_plane_state);
+ funcs->atomic_update(plane, old_state);
}
}
@@ -2645,10 +2602,10 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
plane_funcs->atomic_disable)
- plane_funcs->atomic_disable(plane, old_plane_state);
+ plane_funcs->atomic_disable(plane, old_state);
else if (new_plane_state->crtc ||
drm_atomic_plane_disabling(old_plane_state, new_plane_state))
- plane_funcs->atomic_update(plane, old_plane_state);
+ plane_funcs->atomic_update(plane, old_state);
}
if (crtc_funcs && crtc_funcs->atomic_flush)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 9c4f9947b194..26a77a735905 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -735,11 +735,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
fb->format->format,
fb->modifier);
if (ret) {
- struct drm_format_name_buf format_name;
-
- DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
- drm_get_format_name(fb->format->format,
- &format_name),
+ DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n",
+ &fb->format->format,
fb->modifier);
goto out;
}
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 309afe61afdd..42a0c6888c33 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1154,6 +1154,7 @@ static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
drm_dp_encode_sideband_req(&req, msg);
+ msg->path_msg = true;
}
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
@@ -2303,11 +2304,9 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
if (port->pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
- port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ port->port_num >= DP_MST_LOGICAL_PORT_0)
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
- drm_connector_set_tile_property(port->connector);
- }
drm_connector_register(port->connector);
return;
@@ -2824,15 +2823,21 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
req_type = txmsg->msg[0] & 0x7f;
if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
- req_type == DP_RESOURCE_STATUS_NOTIFY)
+ req_type == DP_RESOURCE_STATUS_NOTIFY ||
+ req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
hdr->broadcast = 1;
else
hdr->broadcast = 0;
hdr->path_msg = txmsg->path_msg;
- hdr->lct = mstb->lct;
- hdr->lcr = mstb->lct - 1;
- if (mstb->lct > 1)
- memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
+ if (hdr->broadcast) {
+ hdr->lct = 1;
+ hdr->lcr = 6;
+ } else {
+ hdr->lct = mstb->lct;
+ hdr->lcr = mstb->lct - 1;
+ }
+
+ memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
return 0;
}
@@ -4234,9 +4239,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
case DP_PEER_DEVICE_SST_SINK:
ret = connector_status_connected;
/* for logical ports - cache the EDID */
- if (port->port_num >= 8 && !port->cached_edid) {
+ if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
- }
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
@@ -5121,11 +5125,16 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
if (!found)
return 0;
- /* This should never happen, as it means we tried to
- * set a mode before querying the full_pbn
+ /*
+ * This could happen if the sink deasserted its HPD line, but
+ * the branch device still reports it as attached (PDT != NONE).
*/
- if (WARN_ON(!port->full_pbn))
+ if (!port->full_pbn) {
+ drm_dbg_atomic(port->mgr->dev,
+ "[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
+ port->parent, port);
return -EINVAL;
+ }
pbn_used = vcpi->pbn;
} else {
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 20d22e41d7ce..c2f78dee9f2d 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,7 +61,7 @@ static struct idr drm_minors_idr;
* prefer to embed struct drm_device into their own device
* structure and call drm_dev_init() themselves.
*/
-static bool drm_core_init_complete = false;
+static bool drm_core_init_complete;
static struct dentry *drm_debugfs_root;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index aca62ed51e82..4d01464b6f95 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -177,11 +177,8 @@ static int framebuffer_check(struct drm_device *dev,
/* check if the format is supported at all */
if (!__drm_format_info(r->pixel_format)) {
- struct drm_format_name_buf format_name;
-
- DRM_DEBUG_KMS("bad framebuffer format %s\n",
- drm_get_format_name(r->pixel_format,
- &format_name));
+ DRM_DEBUG_KMS("bad framebuffer format %p4cc\n",
+ &r->pixel_format);
return -EINVAL;
}
@@ -1160,14 +1157,12 @@ EXPORT_SYMBOL(drm_framebuffer_plane_height);
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb)
{
- struct drm_format_name_buf format_name;
unsigned int i;
drm_printf_indent(p, indent, "allocated by = %s\n", fb->comm);
drm_printf_indent(p, indent, "refcount=%u\n",
drm_framebuffer_read_refcount(fb));
- drm_printf_indent(p, indent, "format=%s\n",
- drm_get_format_name(fb->format->format, &format_name));
+ drm_printf_indent(p, indent, "format=%p4cc\n", &fb->format->format);
drm_printf_indent(p, indent, "modifier=0x%llx\n", fb->modifier);
drm_printf_indent(p, indent, "size=%ux%u\n", fb->width, fb->height);
drm_printf_indent(p, indent, "layers:\n");
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index c2ce78c4edc3..9989425e9875 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -1212,6 +1212,7 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return 0;
}
+EXPORT_SYMBOL(drm_gem_vmap);
void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
@@ -1224,6 +1225,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
/* Always set the mapping to NULL. Callers may rely on this. */
dma_buf_map_clear(map);
}
+EXPORT_SYMBOL(drm_gem_vunmap);
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
new file mode 100644
index 000000000000..a005c5a0ba46
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/dma-resv.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "drm_internal.h"
+
+/**
+ * DOC: overview
+ *
+ * The GEM atomic helpers library implements generic atomic-commit
+ * functions for drivers that use GEM objects. Currently, it provides
+ * synchronization helpers, and plane state and framebuffer BO mappings
+ * for planes with shadow buffers.
+ *
+ * Before scanout, a plane's framebuffer needs to be synchronized with
+ * possible writers that draw into the framebuffer. All drivers should
+ * call drm_gem_plane_helper_prepare_fb() from their implementation of
+ * struct &drm_plane_helper.prepare_fb . It sets the plane's fence from
+ * the framebuffer so that the DRM core can synchronize access automatically.
+ *
+ * drm_gem_plane_helper_prepare_fb() can also be used directly as
+ * implementation of prepare_fb. For drivers based on
+ * struct drm_simple_display_pipe, drm_gem_simple_display_pipe_prepare_fb()
+ * provides equivalent functionality.
+ *
+ * .. code-block:: c
+ *
+ * #include <drm/drm_gem_atomic_helper.h>
+ *
+ * struct drm_plane_helper_funcs driver_plane_helper_funcs = {
+ * ...,
+ * . prepare_fb = drm_gem_plane_helper_prepare_fb,
+ * };
+ *
+ * struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
+ * ...,
+ * . prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
+ * };
+ *
+ * A driver using a shadow buffer copies the content of the shadow buffers
+ * into the HW's framebuffer memory during an atomic update. This requires
+ * a mapping of the shadow buffer into kernel address space. The mappings
+ * cannot be established by commit-tail functions, such as atomic_update,
+ * as this would violate locking rules around dma_buf_vmap().
+ *
+ * The helpers for shadow-buffered planes establish and release mappings,
+ * and provide struct drm_shadow_plane_state, which stores the plane's mapping
+ * for commit-tail functons.
+ *
+ * Shadow-buffered planes can easily be enabled by using the provided macros
+ * %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS.
+ * These macros set up the plane and plane-helper callbacks to point to the
+ * shadow-buffer helpers.
+ *
+ * .. code-block:: c
+ *
+ * #include <drm/drm_gem_atomic_helper.h>
+ *
+ * struct drm_plane_funcs driver_plane_funcs = {
+ * ...,
+ * DRM_GEM_SHADOW_PLANE_FUNCS,
+ * };
+ *
+ * struct drm_plane_helper_funcs driver_plane_helper_funcs = {
+ * ...,
+ * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ * };
+ *
+ * In the driver's atomic-update function, shadow-buffer mappings are available
+ * from the plane state. Use to_drm_shadow_plane_state() to upcast from
+ * struct drm_plane_state.
+ *
+ * .. code-block:: c
+ *
+ * void driver_plane_atomic_update(struct drm_plane *plane,
+ * struct drm_plane_state *old_plane_state)
+ * {
+ * struct drm_plane_state *plane_state = plane->state;
+ * struct drm_shadow_plane_state *shadow_plane_state =
+ * to_drm_shadow_plane_state(plane_state);
+ *
+ * // access shadow buffer via shadow_plane_state->map
+ * }
+ *
+ * A mapping address for each of the framebuffer's buffer object is stored in
+ * struct &drm_shadow_plane_state.map. The mappings are valid while the state
+ * is being used.
+ *
+ * Drivers that use struct drm_simple_display_pipe can use
+ * %DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS to initialize the rsp
+ * callbacks. Access to shadow-buffer mappings is similar to regular
+ * atomic_update.
+ *
+ * .. code-block:: c
+ *
+ * struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
+ * ...,
+ * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+ * };
+ *
+ * void driver_pipe_enable(struct drm_simple_display_pipe *pipe,
+ * struct drm_crtc_state *crtc_state,
+ * struct drm_plane_state *plane_state)
+ * {
+ * struct drm_shadow_plane_state *shadow_plane_state =
+ * to_drm_shadow_plane_state(plane_state);
+ *
+ * // access shadow buffer via shadow_plane_state->map
+ * }
+ */
+
+/*
+ * Plane Helpers
+ */
+
+/**
+ * drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer
+ * @plane: Plane
+ * @state: Plane state the fence will be attached to
+ *
+ * This function extracts the exclusive fence from &drm_gem_object.resv and
+ * attaches it to plane state for the atomic helper to wait on. This is
+ * necessary to correctly implement implicit synchronization for any buffers
+ * shared as a struct &dma_buf. This function can be used as the
+ * &drm_plane_helper_funcs.prepare_fb callback.
+ *
+ * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
+ * GEM based framebuffer drivers which have their buffers always pinned in
+ * memory.
+ *
+ * See drm_atomic_set_fence_for_plane() for a discussion of implicit and
+ * explicit fencing in atomic modeset updates.
+ */
+int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
+{
+ struct drm_gem_object *obj;
+ struct dma_fence *fence;
+
+ if (!state->fb)
+ return 0;
+
+ obj = drm_gem_fb_get_obj(state->fb, 0);
+ fence = dma_resv_get_excl_rcu(obj->resv);
+ drm_atomic_set_fence_for_plane(state, fence);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
+
+/**
+ * drm_gem_simple_display_pipe_prepare_fb - prepare_fb helper for &drm_simple_display_pipe
+ * @pipe: Simple display pipe
+ * @plane_state: Plane state
+ *
+ * This function uses drm_gem_plane_helper_prepare_fb() to extract the exclusive fence
+ * from &drm_gem_object.resv and attaches it to plane state for the atomic
+ * helper to wait on. This is necessary to correctly implement implicit
+ * synchronization for any buffers shared as a struct &dma_buf. Drivers can use
+ * this as their &drm_simple_display_pipe_funcs.prepare_fb callback.
+ *
+ * See drm_atomic_set_fence_for_plane() for a discussion of implicit and
+ * explicit fencing in atomic modeset updates.
+ */
+int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ return drm_gem_plane_helper_prepare_fb(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(drm_gem_simple_display_pipe_prepare_fb);
+
+/*
+ * Shadow-buffered Planes
+ */
+
+/**
+ * drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
+ * @plane: the plane
+ *
+ * This function implements struct &drm_plane_funcs.atomic_duplicate_state for
+ * shadow-buffered planes. It assumes the existing state to be of type
+ * struct drm_shadow_plane_state and it allocates the new state to be of this
+ * type.
+ *
+ * The function does not duplicate existing mappings of the shadow buffers.
+ * Mappings are maintained during the atomic commit by the plane's prepare_fb
+ * and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb()
+ * for corresponding helpers.
+ *
+ * Returns:
+ * A pointer to a new plane state on success, or NULL otherwise.
+ */
+struct drm_plane_state *
+drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane)
+{
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_shadow_plane_state *new_shadow_plane_state;
+
+ if (!plane_state)
+ return NULL;
+
+ new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL);
+ if (!new_shadow_plane_state)
+ return NULL;
+ __drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
+
+ return &new_shadow_plane_state->base;
+}
+EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state);
+
+/**
+ * drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state
+ * @plane: the plane
+ * @plane_state: the plane state of type struct drm_shadow_plane_state
+ *
+ * This function implements struct &drm_plane_funcs.atomic_destroy_state
+ * for shadow-buffered planes. It expects that mappings of shadow buffers
+ * have been released already.
+ */
+void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_shadow_plane_state *shadow_plane_state =
+ to_drm_shadow_plane_state(plane_state);
+
+ __drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base);
+ kfree(shadow_plane_state);
+}
+EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
+
+/**
+ * drm_gem_reset_shadow_plane - resets a shadow-buffered plane
+ * @plane: the plane
+ *
+ * This function implements struct &drm_plane_funcs.reset_plane for
+ * shadow-buffered planes. It assumes the current plane state to be
+ * of type struct drm_shadow_plane and it allocates the new state of
+ * this type.
+ */
+void drm_gem_reset_shadow_plane(struct drm_plane *plane)
+{
+ struct drm_shadow_plane_state *shadow_plane_state;
+
+ if (plane->state) {
+ drm_gem_destroy_shadow_plane_state(plane, plane->state);
+ plane->state = NULL; /* must be set to NULL here */
+ }
+
+ shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL);
+ if (!shadow_plane_state)
+ return;
+ __drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
+}
+EXPORT_SYMBOL(drm_gem_reset_shadow_plane);
+
+/**
+ * drm_gem_prepare_shadow_fb - prepares shadow framebuffers
+ * @plane: the plane
+ * @plane_state: the plane state of type struct drm_shadow_plane_state
+ *
+ * This function implements struct &drm_plane_helper_funcs.prepare_fb. It
+ * maps all buffer objects of the plane's framebuffer into kernel address
+ * space and stores them in &struct drm_shadow_plane_state.map. The
+ * framebuffer will be synchronized as part of the atomic commit.
+ *
+ * See drm_gem_cleanup_shadow_fb() for cleanup.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state)
+{
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_gem_object *obj;
+ struct dma_buf_map map;
+ int ret;
+ size_t i;
+
+ if (!fb)
+ return 0;
+
+ ret = drm_gem_plane_helper_prepare_fb(plane, plane_state);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(shadow_plane_state->map); ++i) {
+ obj = drm_gem_fb_get_obj(fb, i);
+ if (!obj)
+ continue;
+ ret = drm_gem_vmap(obj, &map);
+ if (ret)
+ goto err_drm_gem_vunmap;
+ shadow_plane_state->map[i] = map;
+ }
+
+ return 0;
+
+err_drm_gem_vunmap:
+ while (i) {
+ --i;
+ obj = drm_gem_fb_get_obj(fb, i);
+ if (!obj)
+ continue;
+ drm_gem_vunmap(obj, &shadow_plane_state->map[i]);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_prepare_shadow_fb);
+
+/**
+ * drm_gem_cleanup_shadow_fb - releases shadow framebuffers
+ * @plane: the plane
+ * @plane_state: the plane state of type struct drm_shadow_plane_state
+ *
+ * This function implements struct &drm_plane_helper_funcs.cleanup_fb.
+ * This function unmaps all buffer objects of the plane's framebuffer.
+ *
+ * See drm_gem_prepare_shadow_fb() for more inforamtion.
+ */
+void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state)
+{
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ size_t i = ARRAY_SIZE(shadow_plane_state->map);
+ struct drm_gem_object *obj;
+
+ if (!fb)
+ return;
+
+ while (i) {
+ --i;
+ obj = drm_gem_fb_get_obj(fb, i);
+ if (!obj)
+ continue;
+ drm_gem_vunmap(obj, &shadow_plane_state->map[i]);
+ }
+}
+EXPORT_SYMBOL(drm_gem_cleanup_shadow_fb);
+
+/**
+ * drm_gem_simple_kms_prepare_shadow_fb - prepares shadow framebuffers
+ * @pipe: the simple display pipe
+ * @plane_state: the plane state of type struct drm_shadow_plane_state
+ *
+ * This function implements struct drm_simple_display_funcs.prepare_fb. It
+ * maps all buffer objects of the plane's framebuffer into kernel address
+ * space and stores them in struct drm_shadow_plane_state.map. The
+ * framebuffer will be synchronized as part of the atomic commit.
+ *
+ * See drm_gem_simple_kms_cleanup_shadow_fb() for cleanup.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ return drm_gem_prepare_shadow_fb(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(drm_gem_simple_kms_prepare_shadow_fb);
+
+/**
+ * drm_gem_simple_kms_cleanup_shadow_fb - releases shadow framebuffers
+ * @pipe: the simple display pipe
+ * @plane_state: the plane state of type struct drm_shadow_plane_state
+ *
+ * This function implements struct drm_simple_display_funcs.cleanup_fb.
+ * This function unmaps all buffer objects of the plane's framebuffer.
+ *
+ * See drm_gem_simple_kms_prepare_shadow_fb().
+ */
+void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ drm_gem_cleanup_shadow_fb(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(drm_gem_simple_kms_cleanup_shadow_fb);
+
+/**
+ * drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane
+ * @pipe: the simple display pipe
+ *
+ * This function implements struct drm_simple_display_funcs.reset_plane
+ * for shadow-buffered planes.
+ */
+void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe)
+{
+ drm_gem_reset_shadow_plane(&pipe->plane);
+}
+EXPORT_SYMBOL(drm_gem_simple_kms_reset_shadow_plane);
+
+/**
+ * drm_gem_simple_kms_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
+ * @pipe: the simple display pipe
+ *
+ * This function implements struct drm_simple_display_funcs.duplicate_plane_state
+ * for shadow-buffered planes. It does not duplicate existing mappings of the shadow
+ * buffers. Mappings are maintained during the atomic commit by the plane's prepare_fb
+ * and cleanup_fb helpers.
+ *
+ * Returns:
+ * A pointer to a new plane state on success, or NULL otherwise.
+ */
+struct drm_plane_state *
+drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe)
+{
+ return drm_gem_duplicate_shadow_plane_state(&pipe->plane);
+}
+EXPORT_SYMBOL(drm_gem_simple_kms_duplicate_shadow_plane_state);
+
+/**
+ * drm_gem_simple_kms_destroy_shadow_plane_state - resets shadow-buffered plane state
+ * @pipe: the simple display pipe
+ * @plane_state: the plane state of type struct drm_shadow_plane_state
+ *
+ * This function implements struct drm_simple_display_funcs.destroy_plane_state
+ * for shadow-buffered planes. It expects that mappings of shadow buffers
+ * have been released already.
+ */
+void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state);
+}
+EXPORT_SYMBOL(drm_gem_simple_kms_destroy_shadow_plane_state);
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 109d11fb4cd4..5ed2067cebb6 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -5,13 +5,8 @@
* Copyright (C) 2017 Noralf Trønnes
*/
-#include <linux/dma-buf.h>
-#include <linux/dma-fence.h>
-#include <linux/dma-resv.h>
#include <linux/slab.h>
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
@@ -19,7 +14,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
-#include <drm/drm_simple_kms_helper.h>
#define AFBC_HEADER_SIZE 16
#define AFBC_TH_LAYOUT_ALIGNMENT 8
@@ -432,60 +426,3 @@ int drm_gem_fb_afbc_init(struct drm_device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init);
-
-/**
- * drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
- * @plane: Plane
- * @state: Plane state the fence will be attached to
- *
- * This function extracts the exclusive fence from &drm_gem_object.resv and
- * attaches it to plane state for the atomic helper to wait on. This is
- * necessary to correctly implement implicit synchronization for any buffers
- * shared as a struct &dma_buf. This function can be used as the
- * &drm_plane_helper_funcs.prepare_fb callback.
- *
- * There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
- * gem based framebuffer drivers which have their buffers always pinned in
- * memory.
- *
- * See drm_atomic_set_fence_for_plane() for a discussion of implicit and
- * explicit fencing in atomic modeset updates.
- */
-int drm_gem_fb_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- struct drm_gem_object *obj;
- struct dma_fence *fence;
-
- if (!state->fb)
- return 0;
-
- obj = drm_gem_fb_get_obj(state->fb, 0);
- fence = dma_resv_get_excl_rcu(obj->resv);
- drm_atomic_set_fence_for_plane(state, fence);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
-
-/**
- * drm_gem_fb_simple_display_pipe_prepare_fb - prepare_fb helper for
- * &drm_simple_display_pipe
- * @pipe: Simple display pipe
- * @plane_state: Plane state
- *
- * This function uses drm_gem_fb_prepare_fb() to extract the exclusive fence
- * from &drm_gem_object.resv and attaches it to plane state for the atomic
- * helper to wait on. This is necessary to correctly implement implicit
- * synchronization for any buffers shared as a struct &dma_buf. Drivers can use
- * this as their &drm_simple_display_pipe_funcs.prepare_fb callback.
- *
- * See drm_atomic_set_fence_for_plane() for a discussion of implicit and
- * explicit fencing in atomic modeset updates.
- */
-int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state)
-{
- return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
-}
-EXPORT_SYMBOL(drm_gem_fb_simple_display_pipe_prepare_fb);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 0b232a73c1b7..2b7c3a07956d 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -8,7 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
@@ -187,9 +187,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
struct drm_gem_vram_object *gbo;
struct drm_gem_object *gem;
struct drm_vram_mm *vmm = dev->vram_mm;
- struct ttm_bo_device *bdev;
+ struct ttm_device *bdev;
int ret;
- size_t acc_size;
if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
return ERR_PTR(-EINVAL);
@@ -216,7 +215,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
}
bdev = &vmm->bdev;
- acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
gbo->bo.bdev = bdev;
drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
@@ -226,8 +224,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
* to release gbo->bo.base and kfree gbo.
*/
ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
- &gbo->placement, pg_align, false, acc_size,
- NULL, NULL, ttm_buffer_object_destroy);
+ &gbo->placement, pg_align, false, NULL, NULL,
+ ttm_buffer_object_destroy);
if (ret)
return ERR_PTR(ret);
@@ -558,7 +556,7 @@ err_drm_gem_object_put:
EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
/*
- * Helpers for struct ttm_bo_driver
+ * Helpers for struct ttm_device_funcs
*/
static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
@@ -573,9 +571,7 @@ static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
*pl = gbo->placement;
}
-static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
- bool evict,
- struct ttm_resource *new_mem)
+static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo)
{
struct ttm_buffer_object *bo = &gbo->bo;
struct drm_device *dev = bo->base.dev;
@@ -592,16 +588,8 @@ static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
- int ret;
-
- drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
- ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
- if (ret) {
- swap(*new_mem, gbo->bo.mem);
- drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem);
- swap(*new_mem, gbo->bo.mem);
- }
- return ret;
+ drm_gem_vram_bo_driver_move_notify(gbo);
+ return ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
}
/*
@@ -720,7 +708,7 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
goto err_drm_gem_vram_unpin;
}
- ret = drm_gem_fb_prepare_fb(plane, new_state);
+ ret = drm_gem_plane_helper_prepare_fb(plane, new_state);
if (ret)
goto err_drm_gem_vram_unpin;
@@ -901,7 +889,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
* TTM TT
*/
-static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
+static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{
ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt);
@@ -957,7 +945,7 @@ static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo)
gbo = drm_gem_vram_of_bo(bo);
- drm_gem_vram_bo_driver_move_notify(gbo, false, NULL);
+ drm_gem_vram_bo_driver_move_notify(gbo);
}
static int bo_driver_move(struct ttm_buffer_object *bo,
@@ -973,7 +961,7 @@ static int bo_driver_move(struct ttm_buffer_object *bo,
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
}
-static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
+static int bo_driver_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
@@ -993,7 +981,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
return 0;
}
-static struct ttm_bo_driver bo_driver = {
+static struct ttm_device_funcs bo_driver = {
.ttm_tt_create = bo_driver_ttm_tt_create,
.ttm_tt_destroy = bo_driver_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
@@ -1044,7 +1032,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
vmm->vram_base = vram_base;
vmm->vram_size = vram_size;
- ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev,
+ ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
false, true);
@@ -1062,7 +1050,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
{
ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM);
- ttm_bo_device_release(&vmm->bdev);
+ ttm_device_fini(&vmm->bdev);
}
/*
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index dc734d4828a1..33390f02f5eb 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -302,12 +302,8 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_stats32_t __user *argp = (void __user *)arg;
- int err;
-
- err = drm_ioctl_kernel(file, drm_noop, NULL, 0);
- if (err)
- return err;
+ /* getstats is defunct, just clear */
if (clear_user(argp, sizeof(drm_stats32_t)))
return -EFAULT;
return 0;
@@ -820,13 +816,8 @@ typedef struct drm_update_draw32 {
static int compat_drm_update_draw(struct file *file, unsigned int cmd,
unsigned long arg)
{
- drm_update_draw32_t update32;
-
- if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
- return -EFAULT;
-
- return drm_ioctl_kernel(file, drm_noop, NULL,
- DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
+ /* update_draw is defunct */
+ return 0;
}
#endif
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 230c4fd7131c..43a9b739bba7 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -203,7 +203,6 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem);
struct dma_buf_attachment *import_attach = gem->import_attach;
- struct drm_format_name_buf format_name;
void *src = cma_obj->vaddr;
int ret = 0;
@@ -225,8 +224,8 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
break;
default:
- drm_err_once(fb->dev, "Format is not supported: %s\n",
- drm_get_format_name(fb->format->format, &format_name));
+ drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
+ &fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 58f5dc2f6dd5..f6bdec7fa925 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data onegx1_pro = {
+ .width = 1200,
+ .height = 1920,
+ .bios_dates = (const char * const []){ "12/17/2020", NULL },
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.width = 720,
.height = 1280,
@@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* OneGX1 Pro */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
+ },
+ .driver_data = (void *)&onegx1_pro,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 338650abd267..0dd43882fe7c 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -50,10 +50,8 @@
* &struct drm_plane (possibly as part of a larger structure) and registers it
* with a call to drm_universal_plane_init().
*
- * The type of a plane is exposed in the immutable "type" enumeration property,
- * which has one of the following values: "Overlay", "Primary", "Cursor" (see
- * enum drm_plane_type). A plane can be compatible with multiple CRTCs, see
- * &drm_plane.possible_crtcs.
+ * Each plane has a type, see enum drm_plane_type. A plane can be compatible
+ * with multiple CRTCs, see &drm_plane.possible_crtcs.
*
* Each CRTC must have a unique primary plane userspace can attach to enable
* the CRTC. In other words, userspace must be able to attach a different
@@ -73,6 +71,58 @@
*
* DRM planes have a few standardized properties:
*
+ * type:
+ * Immutable property describing the type of the plane.
+ *
+ * For user-space which has enabled the &DRM_CLIENT_CAP_ATOMIC capability,
+ * the plane type is just a hint and is mostly superseded by atomic
+ * test-only commits. The type hint can still be used to come up more
+ * easily with a plane configuration accepted by the driver.
+ *
+ * The value of this property can be one of the following:
+ *
+ * "Primary":
+ * To light up a CRTC, attaching a primary plane is the most likely to
+ * work if it covers the whole CRTC and doesn't have scaling or
+ * cropping set up.
+ *
+ * Drivers may support more features for the primary plane, user-space
+ * can find out with test-only atomic commits.
+ *
+ * Some primary planes are implicitly used by the kernel in the legacy
+ * IOCTLs &DRM_IOCTL_MODE_SETCRTC and &DRM_IOCTL_MODE_PAGE_FLIP.
+ * Therefore user-space must not mix explicit usage of any primary
+ * plane (e.g. through an atomic commit) with these legacy IOCTLs.
+ *
+ * "Cursor":
+ * To enable this plane, using a framebuffer configured without scaling
+ * or cropping and with the following properties is the most likely to
+ * work:
+ *
+ * - If the driver provides the capabilities &DRM_CAP_CURSOR_WIDTH and
+ * &DRM_CAP_CURSOR_HEIGHT, create the framebuffer with this size.
+ * Otherwise, create a framebuffer with the size 64x64.
+ * - If the driver doesn't support modifiers, create a framebuffer with
+ * a linear layout. Otherwise, use the IN_FORMATS plane property.
+ *
+ * Drivers may support more features for the cursor plane, user-space
+ * can find out with test-only atomic commits.
+ *
+ * Some cursor planes are implicitly used by the kernel in the legacy
+ * IOCTLs &DRM_IOCTL_MODE_CURSOR and &DRM_IOCTL_MODE_CURSOR2.
+ * Therefore user-space must not mix explicit usage of any cursor
+ * plane (e.g. through an atomic commit) with these legacy IOCTLs.
+ *
+ * Some drivers may support cursors even if no cursor plane is exposed.
+ * In this case, the legacy cursor IOCTLs can be used to configure the
+ * cursor.
+ *
+ * "Overlay":
+ * Neither primary nor cursor.
+ *
+ * Overlay planes are the only planes exposed when the
+ * &DRM_CLIENT_CAP_UNIVERSAL_PLANES capability is disabled.
+ *
* IN_FORMATS:
* Blob property which contains the set of buffer format and modifier
* pairs supported by this plane. The blob is a struct
@@ -719,12 +769,8 @@ static int __setplane_check(struct drm_plane *plane,
ret = drm_plane_check_pixel_format(plane, fb->format->format,
fb->modifier);
if (ret) {
- struct drm_format_name_buf format_name;
-
- DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
- drm_get_format_name(fb->format->format,
- &format_name),
- fb->modifier);
+ DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n",
+ &fb->format->format, fb->modifier);
return ret;
}
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 6ce8f5cd1eb5..0b095a313c44 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -177,14 +177,16 @@ static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
};
static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *plane_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_simple_display_pipe *pipe;
struct drm_crtc_state *crtc_state;
int ret;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
- crtc_state = drm_atomic_get_new_crtc_state(plane_state->state,
+ crtc_state = drm_atomic_get_new_crtc_state(state,
&pipe->crtc);
ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
@@ -204,8 +206,10 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
}
static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_pstate)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_pstate = drm_atomic_get_old_plane_state(state,
+ plane);
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
@@ -253,13 +257,47 @@ static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
.atomic_update = drm_simple_kms_plane_atomic_update,
};
+static void drm_simple_kms_plane_reset(struct drm_plane *plane)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ if (!pipe->funcs || !pipe->funcs->reset_plane)
+ return drm_atomic_helper_plane_reset(plane);
+
+ return pipe->funcs->reset_plane(pipe);
+}
+
+static struct drm_plane_state *drm_simple_kms_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ if (!pipe->funcs || !pipe->funcs->duplicate_plane_state)
+ return drm_atomic_helper_plane_duplicate_state(plane);
+
+ return pipe->funcs->duplicate_plane_state(pipe);
+}
+
+static void drm_simple_kms_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_simple_display_pipe *pipe;
+
+ pipe = container_of(plane, struct drm_simple_display_pipe, plane);
+ if (!pipe->funcs || !pipe->funcs->destroy_plane_state)
+ drm_atomic_helper_plane_destroy_state(plane, state);
+ else
+ pipe->funcs->destroy_plane_state(pipe, state);
+}
+
static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .reset = drm_simple_kms_plane_reset,
+ .atomic_duplicate_state = drm_simple_kms_plane_duplicate_state,
+ .atomic_destroy_state = drm_simple_kms_plane_destroy_state,
.format_mod_supported = drm_simple_kms_format_mod_supported,
};
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 349146049849..6231a8214c25 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -387,6 +387,15 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
if (!syncobj)
return -ENOENT;
+ /* Waiting for userspace with locks help is illegal cause that can
+ * trivial deadlock with page faults for example. Make lockdep complain
+ * about it early on.
+ */
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ might_sleep();
+ lockdep_assert_none_held_once();
+ }
+
*fence = drm_syncobj_fence_get(syncobj);
if (*fence) {
@@ -942,6 +951,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
+ if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
+ lockdep_assert_none_held_once();
+
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
if (points == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 893165eeddf3..2bd989688eae 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -1470,20 +1470,7 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_on);
-/**
- * drm_vblank_restore - estimate missed vblanks and update vblank count.
- * @dev: DRM device
- * @pipe: CRTC index
- *
- * Power manamement features can cause frame counter resets between vblank
- * disable and enable. Drivers can use this function in their
- * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
- * the last &drm_crtc_funcs.disable_vblank using timestamps and update the
- * vblank counter.
- *
- * This function is the legacy version of drm_crtc_vblank_restore().
- */
-void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
+static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
{
ktime_t t_vblank;
struct drm_vblank_crtc *vblank;
@@ -1519,7 +1506,6 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
diff, diff_ns, framedur_ns, cur_vblank - vblank->last);
store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
}
-EXPORT_SYMBOL(drm_vblank_restore);
/**
* drm_crtc_vblank_restore - estimate missed vblanks and update vblank count.
@@ -1530,9 +1516,18 @@ EXPORT_SYMBOL(drm_vblank_restore);
* &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
* the last &drm_crtc_funcs.disable_vblank using timestamps and update the
* vblank counter.
+ *
+ * Note that drivers must have race-free high-precision timestamping support,
+ * i.e. &drm_crtc_funcs.get_vblank_timestamp must be hooked up and
+ * &drm_driver.vblank_disable_immediate must be set to indicate the
+ * time-stamping functions are race-free against vblank hardware counter
+ * increments.
*/
void drm_crtc_vblank_restore(struct drm_crtc *crtc)
{
+ WARN_ON_ONCE(!crtc->funcs->get_vblank_timestamp);
+ WARN_ON_ONCE(!crtc->dev->vblank_disable_immediate);
+
drm_vblank_restore(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_restore);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index cd46c882269c..19826e504efc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -82,7 +82,8 @@ static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
return fence;
}
-static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
+static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
+ *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu;
@@ -120,9 +121,13 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
drm_sched_resubmit_jobs(&gpu->sched);
+ drm_sched_start(&gpu->sched, true);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
+
out_no_timeout:
/* restart scheduler after GPU is usable again */
drm_sched_start(&gpu->sched, true);
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
@@ -185,7 +190,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
- msecs_to_jiffies(500), dev_name(gpu->dev));
+ msecs_to_jiffies(500), NULL, dev_name(gpu->dev));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index b29afced7374..df76bdee7dca 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -228,14 +228,16 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
}
static int exynos_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_plane_state *exynos_state =
- to_exynos_plane_state(state);
+ to_exynos_plane_state(new_plane_state);
int ret = 0;
- if (!state->crtc || !state->fb)
+ if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
/* translate state into exynos_state */
@@ -250,13 +252,14 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
}
static void exynos_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(state->crtc);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(new_state->crtc);
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
- if (!state->crtc)
+ if (!new_state->crtc)
return;
if (exynos_crtc->ops->update_plane)
@@ -264,8 +267,9 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
}
static void exynos_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(old_state->crtc);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 3c6d9f3913d5..8fe953d6e0a9 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -7,6 +7,7 @@
#include <linux/regmap.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
@@ -33,11 +34,13 @@ static int fsl_dcu_drm_plane_index(struct drm_plane *plane)
}
static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_framebuffer *fb = state->fb;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *fb = new_plane_state->fb;
- if (!state->fb || !state->crtc)
+ if (!new_plane_state->fb || !new_plane_state->crtc)
return 0;
switch (fb->format->format) {
@@ -57,7 +60,7 @@ static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
}
static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
unsigned int value;
@@ -73,11 +76,12 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
}
static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_framebuffer *fb = plane->state->fb;
struct drm_gem_cma_object *gem;
unsigned int alpha = DCU_LAYER_AB_NONE, bpp;
@@ -125,11 +129,11 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
}
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
- DCU_LAYER_HEIGHT(state->crtc_h) |
- DCU_LAYER_WIDTH(state->crtc_w));
+ DCU_LAYER_HEIGHT(new_state->crtc_h) |
+ DCU_LAYER_WIDTH(new_state->crtc_w));
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
- DCU_LAYER_POSY(state->crtc_y) |
- DCU_LAYER_POSX(state->crtc_x));
+ DCU_LAYER_POSY(new_state->crtc_y) |
+ DCU_LAYER_POSX(new_state->crtc_x));
regmap_write(fsl_dev->regmap,
DCU_CTRLDESCLN(index, 3), gem->paddr);
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index ec395658a43f..0cff20265f97 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -9,12 +9,5 @@ config DRM_GMA500
select INPUT if ACPI
help
Say yes for an experimental 2D KMS framebuffer driver for the
- Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
- devices.
-
-config DRM_GMA600
- bool "Intel GMA600 support (Experimental)"
- depends on DRM_GMA500
- help
- Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
- platforms with LVDS ports. MIPI is not currently supported.
+ Intel GMA500 (Poulsbo), Intel GMA600 (Moorestown/Oak Trail) and
+ Intel GMA3600/3650 (Cedar Trail).
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 884ab1f9063e..63012bf2485a 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -4,9 +4,7 @@
#
gma500_gfx-y += \
- accel_2d.o \
backlight.o \
- blitter.o \
cdv_device.o \
cdv_intel_crt.o \
cdv_intel_display.o \
@@ -23,6 +21,12 @@ gma500_gfx-y += \
intel_i2c.o \
mid_bios.o \
mmu.o \
+ oaktrail_device.o \
+ oaktrail_crtc.o \
+ oaktrail_hdmi.o \
+ oaktrail_hdmi_i2c.o \
+ oaktrail_lvds.o \
+ oaktrail_lvds_i2c.o \
power.o \
psb_device.o \
psb_drv.o \
@@ -33,13 +37,6 @@ gma500_gfx-y += \
psb_lid.o \
psb_irq.o
-gma500_gfx-$(CONFIG_ACPI) += opregion.o \
-
-gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
- oaktrail_crtc.o \
- oaktrail_lvds.o \
- oaktrail_lvds_i2c.o \
- oaktrail_hdmi.o \
- oaktrail_hdmi_i2c.o
+gma500_gfx-$(CONFIG_ACPI) += opregion.o
obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
deleted file mode 100644
index 437bbb6af9e6..000000000000
--- a/drivers/gpu/drm/gma500/accel_2d.c
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/**************************************************************************
- * Copyright (c) 2007-2011, Intel Corporation.
- * All Rights Reserved.
- *
- * Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
- * develop this driver.
- *
- **************************************************************************/
-
-#include <linux/console.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-
-#include <drm/drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-
-#include "psb_drv.h"
-#include "psb_reg.h"
-
-/**
- * psb_spank - reset the 2D engine
- * @dev_priv: our PSB DRM device
- *
- * Soft reset the graphics engine and then reload the necessary registers.
- * We use this at initialisation time but it will become relevant for
- * accelerated X later
- */
-void psb_spank(struct drm_psb_private *dev_priv)
-{
- PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
- _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
- _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
- _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
- PSB_RSGX32(PSB_CR_SOFT_RESET);
-
- msleep(1);
-
- PSB_WSGX32(0, PSB_CR_SOFT_RESET);
- wmb();
- PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
- PSB_CR_BIF_CTRL);
- wmb();
- (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
-
- msleep(1);
- PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
- PSB_CR_BIF_CTRL);
- (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
- PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
-}
diff --git a/drivers/gpu/drm/gma500/blitter.c b/drivers/gpu/drm/gma500/blitter.c
deleted file mode 100644
index cb2504a4a15f..000000000000
--- a/drivers/gpu/drm/gma500/blitter.c
+++ /dev/null
@@ -1,43 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014, Patrik Jakobsson
- * All Rights Reserved.
- *
- * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
- */
-
-#include "psb_drv.h"
-
-#include "blitter.h"
-#include "psb_reg.h"
-
-/* Wait for the blitter to be completely idle */
-int gma_blt_wait_idle(struct drm_psb_private *dev_priv)
-{
- unsigned long stop = jiffies + HZ;
- int busy = 1;
-
- /* NOP for Cedarview */
- if (IS_CDV(dev_priv->dev))
- return 0;
-
- /* First do a quick check */
- if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
- ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
- return 0;
-
- do {
- busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
- } while (busy && !time_after_eq(jiffies, stop));
-
- if (busy)
- return -EBUSY;
-
- do {
- busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
- _PSB_C2B_STATUS_BUSY) != 0);
- } while (busy && !time_after_eq(jiffies, stop));
-
- /* If still busy, we probably have a hang */
- return (busy) ? -EBUSY : 0;
-}
diff --git a/drivers/gpu/drm/gma500/blitter.h b/drivers/gpu/drm/gma500/blitter.h
deleted file mode 100644
index 8d67dabd9ba3..000000000000
--- a/drivers/gpu/drm/gma500/blitter.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014, Patrik Jakobsson
- * All Rights Reserved.
- *
- * Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
- */
-
-#ifndef __BLITTER_H
-#define __BLITTER_H
-
-struct drm_psb_private;
-
-extern int gma_blt_wait_idle(struct drm_psb_private *dev_priv);
-
-#endif
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 19e055dbd4c2..1342e7fb382f 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -603,7 +603,7 @@ const struct psb_ops cdv_chip_ops = {
.errata = cdv_errata,
.crtc_helper = &cdv_intel_helper_funcs,
- .crtc_funcs = &cdv_intel_crtc_funcs,
+ .crtc_funcs = &gma_intel_crtc_funcs,
.clock_funcs = &cdv_clock_funcs,
.output_init = cdv_output_init,
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
index 37e4bdc84c03..504d717385cd 100644
--- a/drivers/gpu/drm/gma500/cdv_device.h
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -8,7 +8,6 @@ struct drm_device;
struct psb_intel_mode_device;
extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
-extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
extern const struct gma_clock_funcs cdv_clock_funcs;
extern void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index c48c9d322dfb..4a9bb4994a26 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -248,8 +248,6 @@ void cdv_intel_crt_init(struct drm_device *dev,
struct drm_connector *connector;
struct drm_encoder *encoder;
- u32 i2c_reg;
-
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder)
return;
@@ -269,24 +267,13 @@ void cdv_intel_crt_init(struct drm_device *dev,
gma_connector_attach_encoder(gma_connector, gma_encoder);
/* Set up the DDC bus. */
- i2c_reg = GPIOA;
- /* Remove the following code for CDV */
- /*
- if (dev_priv->crt_ddc_bus != 0)
- i2c_reg = dev_priv->crt_ddc_bus;
- }*/
- gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
- i2c_reg, "CRTDDC_A");
+ gma_encoder->ddc_bus = psb_intel_i2c_create(dev, GPIOA, "CRTDDC_A");
if (!gma_encoder->ddc_bus) {
dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n");
goto failed_ddc;
}
gma_encoder->type = INTEL_OUTPUT_ANALOG;
- /*
- psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
- psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
- */
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 5d3302249779..c3a9f6b3c848 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -582,7 +582,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct gma_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
- bool is_lvds = false, is_tv = false;
+ bool is_lvds = false;
bool is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
@@ -603,9 +603,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
- case INTEL_OUTPUT_TVOUT:
- is_tv = true;
- break;
case INTEL_OUTPUT_ANALOG:
case INTEL_OUTPUT_HDMI:
break;
@@ -660,12 +657,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
dpll = DPLL_VGA_MODE_DIS;
- if (is_tv) {
- /* XXX: just matching BIOS for now */
-/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
- dpll |= 3;
- }
-/* dpll |= PLL_REF_INPUT_DREFCLK; */
if (is_dp || is_edp) {
cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -970,18 +961,6 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.disable = gma_crtc_disable,
};
-const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
- .cursor_set = gma_crtc_cursor_set,
- .cursor_move = gma_crtc_cursor_move,
- .gamma_set = gma_crtc_gamma_set,
- .set_config = gma_crtc_set_config,
- .destroy = gma_crtc_destroy,
- .page_flip = gma_crtc_page_flip,
- .enable_vblank = psb_enable_vblank,
- .disable_vblank = psb_disable_vblank,
- .get_vblank_counter = psb_get_vblank_counter,
-};
-
const struct gma_clock_funcs cdv_clock_funcs = {
.clock = cdv_intel_clock,
.limit = cdv_intel_limit,
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index e884750bc123..df9b611b856a 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -11,7 +11,6 @@
#include <asm/set_memory.h>
-#include "blitter.h"
#include "psb_drv.h"
@@ -229,18 +228,9 @@ void psb_gtt_unpin(struct gtt_range *gt)
struct drm_device *dev = gt->gem.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
u32 gpu_base = dev_priv->gtt.gatt_start;
- int ret;
- /* While holding the gtt_mutex no new blits can be initiated */
mutex_lock(&dev_priv->gtt_mutex);
- /* Wait for any possible usage of the memory to be finished */
- ret = gma_blt_wait_idle(dev_priv);
- if (ret) {
- DRM_ERROR("Failed to idle the blitter, unpin failed!");
- goto out;
- }
-
WARN_ON(!gt->in_gart);
gt->in_gart--;
@@ -251,7 +241,6 @@ void psb_gtt_unpin(struct gtt_range *gt)
psb_gtt_detach_pages(gt);
}
-out:
mutex_unlock(&dev_priv->gtt_mutex);
}
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index 370bd6451bd9..eb0924473a21 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -44,13 +44,13 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
+ if (W && !(in_dbg_master())) \
+ msleep(W); \
} \
ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
-#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index aff0534831ef..454156fcbec7 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -545,7 +545,7 @@ const struct psb_ops oaktrail_chip_ops = {
.chip_setup = oaktrail_chip_setup,
.chip_teardown = oaktrail_teardown,
.crtc_helper = &oaktrail_helper_funcs,
- .crtc_funcs = &psb_intel_crtc_funcs,
+ .crtc_funcs = &gma_intel_crtc_funcs,
.output_init = oaktrail_output_init,
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 2d21f8ec595f..951725a0f7a3 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -329,7 +329,7 @@ const struct psb_ops psb_chip_ops = {
.chip_teardown = psb_chip_teardown,
.crtc_helper = &psb_intel_helper_funcs,
- .crtc_funcs = &psb_intel_crtc_funcs,
+ .crtc_funcs = &gma_intel_crtc_funcs,
.clock_funcs = &psb_clock_funcs,
.output_init = psb_output_init,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 0bcab065242c..650af221c916 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -12,6 +12,7 @@
#include <linux/notifier.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
+#include <linux/delay.h>
#include <asm/set_memory.h>
@@ -54,7 +55,7 @@ static const struct pci_device_id pciidlist[] = {
/* Poulsbo */
{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
-#if defined(CONFIG_DRM_GMA600)
+ /* Oak Trail */
{ 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
@@ -64,8 +65,7 @@ static const struct pci_device_id pciidlist[] = {
{ 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
-#endif
- /* Cedartrail */
+ /* Cedar Trail */
{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
@@ -92,6 +92,36 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static const struct drm_ioctl_desc psb_ioctls[] = {
};
+/**
+ * psb_spank - reset the 2D engine
+ * @dev_priv: our PSB DRM device
+ *
+ * Soft reset the graphics engine and then reload the necessary registers.
+ */
+void psb_spank(struct drm_psb_private *dev_priv)
+{
+ PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
+ _PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
+ _PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
+ _PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
+ PSB_RSGX32(PSB_CR_SOFT_RESET);
+
+ msleep(1);
+
+ PSB_WSGX32(0, PSB_CR_SOFT_RESET);
+ wmb();
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+ wmb();
+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+
+ msleep(1);
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
+ PSB_CR_BIF_CTRL);
+ (void) PSB_RSGX32(PSB_CR_BIF_CTRL);
+ PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+}
+
static int psb_do_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 694495070c65..49afa577d442 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -625,13 +625,9 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
/* psb_irq.c */
extern irqreturn_t psb_irq_handler(int irq, void *arg);
-extern int psb_irq_enable_dpst(struct drm_device *dev);
-extern int psb_irq_disable_dpst(struct drm_device *dev);
extern void psb_irq_preinstall(struct drm_device *dev);
extern int psb_irq_postinstall(struct drm_device *dev);
extern void psb_irq_uninstall(struct drm_device *dev);
-extern void psb_irq_turn_on_dpst(struct drm_device *dev);
-extern void psb_irq_turn_off_dpst(struct drm_device *dev);
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
@@ -679,7 +675,7 @@ extern void oaktrail_lvds_init(struct drm_device *dev,
/* psb_intel_display.c */
extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
-extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
+extern const struct drm_crtc_funcs gma_intel_crtc_funcs;
/* psb_intel_lvds.c */
extern const struct drm_connector_helper_funcs
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 9c3cb1b80bbd..359606429316 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -426,7 +426,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.disable = gma_crtc_disable,
};
-const struct drm_crtc_funcs psb_intel_crtc_funcs = {
+const struct drm_crtc_funcs gma_intel_crtc_funcs = {
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,
diff --git a/drivers/gpu/drm/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 364ea8f06f9c..ced7b433befb 100644
--- a/drivers/gpu/drm/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -550,38 +550,6 @@
#define HISTOGRAM_INT_CTRL_CLEAR (1UL << 30)
#define DPST_YUV_LUMA_MODE 0
-struct dpst_ie_histogram_control {
- union {
- uint32_t data;
- struct {
- uint32_t bin_reg_index:7;
- uint32_t reserved:4;
- uint32_t bin_reg_func_select:1;
- uint32_t sync_to_phase_in:1;
- uint32_t alt_enhancement_mode:2;
- uint32_t reserved1:1;
- uint32_t sync_to_phase_in_count:8;
- uint32_t histogram_mode_select:1;
- uint32_t reserved2:4;
- uint32_t ie_pipe_assignment:1;
- uint32_t ie_mode_table_enabled:1;
- uint32_t ie_histogram_enable:1;
- };
- };
-};
-
-struct dpst_guardband {
- union {
- uint32_t data;
- struct {
- uint32_t guardband:22;
- uint32_t guardband_interrupt_delay:8;
- uint32_t interrupt_status:1;
- uint32_t interrupt_enable:1;
- };
- };
-};
-
#define PIPEAFRAMEHIGH 0x70040
#define PIPEAFRAMEPIXEL 0x70044
#define PIPEBFRAMEHIGH 0x71040
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index ae9b100e640b..104009e78487 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -101,30 +101,6 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
}
}
-static void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
-{
- if (gma_power_begin(dev_priv->dev, false)) {
- u32 pipe_event = mid_pipe_event(pipe);
- dev_priv->vdc_irq_mask |= pipe_event;
- PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
- PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
- gma_power_end(dev_priv->dev);
- }
-}
-
-static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
-{
- if (dev_priv->pipestat[pipe] == 0) {
- if (gma_power_begin(dev_priv->dev, false)) {
- u32 pipe_event = mid_pipe_event(pipe);
- dev_priv->vdc_irq_mask &= ~pipe_event;
- PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
- PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
- gma_power_end(dev_priv->dev);
- }
- }
-}
-
/*
* Display controller interrupt handler for pipe event.
*/
@@ -392,92 +368,6 @@ void psb_irq_uninstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
-void psb_irq_turn_on_dpst(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
- u32 hist_reg;
- u32 pwm_reg;
-
- if (gma_power_begin(dev, false)) {
- PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
- hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
- PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
- hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
-
- PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
- pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
- PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
- | PWM_PHASEIN_INT_ENABLE,
- PWM_CONTROL_LOGIC);
- pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
-
- psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
-
- hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
- PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
- HISTOGRAM_INT_CONTROL);
- pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
- PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
- PWM_CONTROL_LOGIC);
-
- gma_power_end(dev);
- }
-}
-
-int psb_irq_enable_dpst(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-
- /* enable DPST */
- mid_enable_pipe_event(dev_priv, 0);
- psb_irq_turn_on_dpst(dev);
-
- spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- return 0;
-}
-
-void psb_irq_turn_off_dpst(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
- u32 pwm_reg;
-
- if (gma_power_begin(dev, false)) {
- PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
- PSB_RVDC32(HISTOGRAM_INT_CONTROL);
-
- psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
-
- pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
- PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
- PWM_CONTROL_LOGIC);
- pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
-
- gma_power_end(dev);
- }
-}
-
-int psb_irq_disable_dpst(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
-
- mid_disable_pipe_event(dev_priv, 0);
- psb_irq_turn_off_dpst(dev);
-
- spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
-
- return 0;
-}
-
/*
* It is used to enable VBLANK interrupt
*/
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 1b577fa7010a..17c9b0b62471 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -23,10 +23,6 @@ int psb_irq_postinstall(struct drm_device *dev);
void psb_irq_uninstall(struct drm_device *dev);
irqreturn_t psb_irq_handler(int irq, void *arg);
-int psb_irq_enable_dpst(struct drm_device *dev);
-int psb_irq_disable_dpst(struct drm_device *dev);
-void psb_irq_turn_on_dpst(struct drm_device *dev);
-void psb_irq_turn_off_dpst(struct drm_device *dev);
int psb_enable_vblank(struct drm_crtc *crtc);
void psb_disable_vblank(struct drm_crtc *crtc);
u32 psb_get_vblank_counter(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 096eea985b6f..fa8da0ef707e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -53,27 +53,29 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
};
static int hibmc_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_framebuffer *fb = state->fb;
- struct drm_crtc *crtc = state->crtc;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
- u32 src_w = state->src_w >> 16;
- u32 src_h = state->src_h >> 16;
+ u32 src_w = new_plane_state->src_w >> 16;
+ u32 src_h = new_plane_state->src_h >> 16;
if (!crtc || !fb)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- if (src_w != state->crtc_w || src_h != state->crtc_h) {
+ if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) {
drm_dbg_atomic(plane->dev, "scale not support\n");
return -EINVAL;
}
- if (state->crtc_x < 0 || state->crtc_y < 0) {
+ if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0) {
drm_dbg_atomic(plane->dev, "crtc_x/y of drm_plane state is invalid\n");
return -EINVAL;
}
@@ -81,15 +83,15 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
if (!crtc_state->enable)
return 0;
- if (state->crtc_x + state->crtc_w >
+ if (new_plane_state->crtc_x + new_plane_state->crtc_w >
crtc_state->adjusted_mode.hdisplay ||
- state->crtc_y + state->crtc_h >
+ new_plane_state->crtc_y + new_plane_state->crtc_h >
crtc_state->adjusted_mode.vdisplay) {
drm_dbg_atomic(plane->dev, "visible portion of plane is invalid\n");
return -EINVAL;
}
- if (state->fb->pitches[0] % 128 != 0) {
+ if (new_plane_state->fb->pitches[0] % 128 != 0) {
drm_dbg_atomic(plane->dev, "wrong stride with 128-byte aligned\n");
return -EINVAL;
}
@@ -97,19 +99,20 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
}
static void hibmc_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
u32 reg;
s64 gpu_addr = 0;
u32 line_l;
struct hibmc_drm_private *priv = to_hibmc_drm_private(plane->dev);
struct drm_gem_vram_object *gbo;
- if (!state->fb)
+ if (!new_state->fb)
return;
- gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
+ gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
gpu_addr = drm_gem_vram_offset(gbo);
if (WARN_ON_ONCE(gpu_addr < 0))
@@ -117,9 +120,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
- reg = state->fb->width * (state->fb->format->cpp[0]);
+ reg = new_state->fb->width * (new_state->fb->format->cpp[0]);
- line_l = state->fb->pitches[0];
+ line_l = new_state->fb->pitches[0];
writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
priv->mmio + HIBMC_CRT_FB_WIDTH);
@@ -128,7 +131,7 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
reg &= ~HIBMC_CRT_DISP_CTL_FORMAT_MASK;
reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_FORMAT,
- state->fb->format->cpp[0] * 8 / 16);
+ new_state->fb->format->cpp[0] * 8 / 16);
writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
}
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index aa6c53f88f7c..6dcf9ec05eec 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -549,16 +549,15 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct drm_format_name_buf format_name;
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
u32 addr = (u32)obj->paddr + y * stride;
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
ch + 1, y, in_h, stride, (u32)obj->paddr);
- DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
+ DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%p4cc)\n",
addr, fb->width, fb->height, fmt,
- drm_get_format_name(fb->format->format, &format_name));
+ &fb->format->format);
/* get reg offset */
reg_ctrl = RD_CH_CTRL(ch);
@@ -758,19 +757,21 @@ static void ade_disable_channel(struct kirin_plane *kplane)
}
static int ade_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_framebuffer *fb = state->fb;
- struct drm_crtc *crtc = state->crtc;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *fb = new_plane_state->fb;
+ struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
- u32 src_x = state->src_x >> 16;
- u32 src_y = state->src_y >> 16;
- u32 src_w = state->src_w >> 16;
- u32 src_h = state->src_h >> 16;
- int crtc_x = state->crtc_x;
- int crtc_y = state->crtc_y;
- u32 crtc_w = state->crtc_w;
- u32 crtc_h = state->crtc_h;
+ u32 src_x = new_plane_state->src_x >> 16;
+ u32 src_y = new_plane_state->src_y >> 16;
+ u32 src_w = new_plane_state->src_w >> 16;
+ u32 src_h = new_plane_state->src_h >> 16;
+ int crtc_x = new_plane_state->crtc_x;
+ int crtc_y = new_plane_state->crtc_y;
+ u32 crtc_w = new_plane_state->crtc_w;
+ u32 crtc_h = new_plane_state->crtc_h;
u32 fmt;
if (!crtc || !fb)
@@ -780,7 +781,7 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
if (fmt == ADE_FORMAT_UNSUPPORT)
return -EINVAL;
- crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -803,19 +804,21 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
}
static void ade_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct kirin_plane *kplane = to_kirin_plane(plane);
- ade_update_channel(kplane, state->fb, state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h,
- state->src_x >> 16, state->src_y >> 16,
- state->src_w >> 16, state->src_h >> 16);
+ ade_update_channel(kplane, new_state->fb, new_state->crtc_x,
+ new_state->crtc_y,
+ new_state->crtc_w, new_state->crtc_h,
+ new_state->src_x >> 16, new_state->src_y >> 16,
+ new_state->src_w >> 16, new_state->src_h >> 16);
}
static void ade_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct kirin_plane *kplane = to_kirin_plane(plane);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 8d7aaa68c6f6..25fed00b5121 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -10228,7 +10228,6 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_format_name_buf format_name;
if (!fb) {
drm_dbg_kms(&i915->drm,
@@ -10239,10 +10238,9 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
}
drm_dbg_kms(&i915->drm,
- "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
+ "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
plane->base.base.id, plane->base.name,
- fb->base.id, fb->width, fb->height,
- drm_get_format_name(fb->format->format, &format_name),
+ fb->base.id, fb->width, fb->height, &fb->format->format,
fb->modifier, yesno(plane_state->uapi.visible));
drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
plane_state->hw.rotation, plane_state->scaler_id);
@@ -14236,13 +14234,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (!drm_any_plane_has_format(&dev_priv->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
- struct drm_format_name_buf format_name;
-
drm_dbg_kms(&dev_priv->drm,
- "unsupported pixel format %s / modifier 0x%llx\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name),
- mode_cmd->modifier[0]);
+ "unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index d62b18d5ecd8..0e364dc3e7c3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -772,27 +772,25 @@ static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
const struct drm_framebuffer *fb = plane_state->uapi.fb;
- struct drm_format_name_buf format_name;
struct drm_rect src, dst;
char rot_str[48];
src = drm_plane_state_src(&plane_state->uapi);
dst = drm_plane_state_dest(&plane_state->uapi);
- if (fb)
- drm_get_format_name(fb->format->format, &format_name);
-
plane_rotation(rot_str, sizeof(rot_str),
plane_state->uapi.rotation);
- seq_printf(m, "\t\tuapi: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
- fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
- fb ? fb->modifier : 0,
- fb ? fb->width : 0, fb ? fb->height : 0,
- plane_visibility(plane_state),
- DRM_RECT_FP_ARG(&src),
- DRM_RECT_ARG(&dst),
- rot_str);
+ seq_puts(m, "\t\tuapi: [FB:");
+ if (fb)
+ seq_printf(m, "%d] %p4cc,0x%llx,%dx%d", fb->base.id,
+ &fb->format->format, fb->modifier, fb->width,
+ fb->height);
+ else
+ seq_puts(m, "0] n/a,0x0,0x0,");
+ seq_printf(m, ", visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT
+ ", rotation=%s\n", plane_visibility(plane_state),
+ DRM_RECT_FP_ARG(&src), DRM_RECT_ARG(&dst), rot_str);
if (plane_state->planar_linked_plane)
seq_printf(m, "\t\tplanar: Linked to [PLANE:%d:%s] as a %s\n",
@@ -805,19 +803,17 @@ static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
const struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_format_name_buf format_name;
char rot_str[48];
if (!fb)
return;
- drm_get_format_name(fb->format->format, &format_name);
-
plane_rotation(rot_str, sizeof(rot_str),
plane_state->hw.rotation);
- seq_printf(m, "\t\thw: [FB:%d] %s,0x%llx,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
- fb->base.id, format_name.str,
+ seq_printf(m, "\t\thw: [FB:%d] %p4cc,0x%llx,%dx%d, visible=%s, src="
+ DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb->base.id, &fb->format->format,
fb->modifier, fb->width, fb->height,
yesno(plane_state->uapi.visible),
DRM_RECT_FP_ARG(&plane_state->uapi.src),
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 993543334a1e..72ed728060e3 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -2320,7 +2320,6 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
- struct drm_format_name_buf format_name;
if (!fb)
return 0;
@@ -2368,9 +2367,8 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
drm_dbg_kms(&dev_priv->drm,
- "Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ "Unsupported pixel format %p4cc for 90/270!\n",
+ &fb->format->format);
return -EINVAL;
default:
break;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
index 03ba88f7f995..044d3bdf313c 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -6,7 +6,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "dcss-dev.h"
@@ -137,11 +137,13 @@ static bool dcss_plane_is_source_size_allowed(u16 src_w, u16 src_h, u32 pix_fmt)
}
static int dcss_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = new_plane_state->fb;
bool is_primary_plane = plane->type == DRM_PLANE_TYPE_PRIMARY;
struct drm_gem_cma_object *cma_obj;
struct drm_crtc_state *crtc_state;
@@ -149,20 +151,20 @@ static int dcss_plane_atomic_check(struct drm_plane *plane,
int min, max;
int ret;
- if (!fb || !state->crtc)
+ if (!fb || !new_plane_state->crtc)
return 0;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
WARN_ON(!cma_obj);
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_plane_state->crtc);
hdisplay = crtc_state->adjusted_mode.hdisplay;
vdisplay = crtc_state->adjusted_mode.vdisplay;
- if (!dcss_plane_is_source_size_allowed(state->src_w >> 16,
- state->src_h >> 16,
+ if (!dcss_plane_is_source_size_allowed(new_plane_state->src_w >> 16,
+ new_plane_state->src_h >> 16,
fb->format->format)) {
DRM_DEBUG_KMS("Source plane size is not allowed!\n");
return -EINVAL;
@@ -171,26 +173,26 @@ static int dcss_plane_atomic_check(struct drm_plane *plane,
dcss_scaler_get_min_max_ratios(dcss->scaler, dcss_plane->ch_num,
&min, &max);
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min, max, !is_primary_plane,
false);
if (ret)
return ret;
- if (!state->visible)
+ if (!new_plane_state->visible)
return 0;
if (!dcss_plane_can_rotate(fb->format,
!!(fb->flags & DRM_MODE_FB_MODIFIERS),
fb->modifier,
- state->rotation)) {
+ new_plane_state->rotation)) {
DRM_DEBUG_KMS("requested rotation is not allowed!\n");
return -EINVAL;
}
- if ((state->crtc_x < 0 || state->crtc_y < 0 ||
- state->crtc_x + state->crtc_w > hdisplay ||
- state->crtc_y + state->crtc_h > vdisplay) &&
+ if ((new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0 ||
+ new_plane_state->crtc_x + new_plane_state->crtc_w > hdisplay ||
+ new_plane_state->crtc_y + new_plane_state->crtc_h > vdisplay) &&
!dcss_plane_fb_is_linear(fb)) {
DRM_DEBUG_KMS("requested cropping operation is not allowed!\n");
return -EINVAL;
@@ -262,12 +264,15 @@ static bool dcss_plane_needs_setup(struct drm_plane_state *state,
}
static void dcss_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = new_state->fb;
struct drm_crtc_state *crtc_state;
bool modifiers_present;
u32 src_w, src_h, dst_w, dst_h;
@@ -275,14 +280,14 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
bool enable = true;
bool is_rotation_90_or_270;
- if (!fb || !state->crtc || !state->visible)
+ if (!fb || !new_state->crtc || !new_state->visible)
return;
- crtc_state = state->crtc->state;
+ crtc_state = new_state->crtc->state;
modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);
if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state) &&
- !dcss_plane_needs_setup(state, old_state)) {
+ !dcss_plane_needs_setup(new_state, old_state)) {
dcss_plane_atomic_set_base(dcss_plane);
return;
}
@@ -302,23 +307,24 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
modifiers_present && fb->modifier == DRM_FORMAT_MOD_LINEAR)
modifiers_present = false;
- dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num, state->fb->format,
+ dcss_dpr_format_set(dcss->dpr, dcss_plane->ch_num,
+ new_state->fb->format,
modifiers_present ? fb->modifier :
DRM_FORMAT_MOD_LINEAR);
dcss_dpr_set_res(dcss->dpr, dcss_plane->ch_num, src_w, src_h);
dcss_dpr_set_rotation(dcss->dpr, dcss_plane->ch_num,
- state->rotation);
+ new_state->rotation);
dcss_plane_atomic_set_base(dcss_plane);
- is_rotation_90_or_270 = state->rotation & (DRM_MODE_ROTATE_90 |
+ is_rotation_90_or_270 = new_state->rotation & (DRM_MODE_ROTATE_90 |
DRM_MODE_ROTATE_270);
dcss_scaler_set_filter(dcss->scaler, dcss_plane->ch_num,
- state->scaling_filter);
+ new_state->scaling_filter);
dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num,
- state->fb->format,
+ new_state->fb->format,
is_rotation_90_or_270 ? src_h : src_w,
is_rotation_90_or_270 ? src_w : src_h,
dst_w, dst_h,
@@ -327,9 +333,9 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
dcss_dtg_plane_pos_set(dcss->dtg, dcss_plane->ch_num,
dst.x1, dst.y1, dst_w, dst_h);
dcss_dtg_plane_alpha_set(dcss->dtg, dcss_plane->ch_num,
- fb->format, state->alpha >> 8);
+ fb->format, new_state->alpha >> 8);
- if (!dcss_plane->ch_num && (state->alpha >> 8) == 0)
+ if (!dcss_plane->ch_num && (new_state->alpha >> 8) == 0)
enable = false;
dcss_dpr_enable(dcss->dpr, dcss_plane->ch_num, enable);
@@ -343,7 +349,7 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
}
static void dcss_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
@@ -355,7 +361,7 @@ static void dcss_plane_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs dcss_plane_helper_funcs = {
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = dcss_plane_atomic_check,
.atomic_update = dcss_plane_atomic_update,
.atomic_disable = dcss_plane_atomic_disable,
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 075508051b5f..fa5009705365 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -9,8 +9,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
@@ -337,12 +337,15 @@ static const struct drm_plane_funcs ipu_plane_funcs = {
};
static int ipu_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *old_state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state;
struct device *dev = plane->dev->dev;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = new_state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba;
bool can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
@@ -352,15 +355,16 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
- if (WARN_ON(!state->crtc))
+ if (WARN_ON(!new_state->crtc))
return -EINVAL;
crtc_state =
- drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ drm_atomic_get_existing_crtc_state(state,
+ new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
can_position, true);
@@ -374,7 +378,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
/* full plane minimum width is 13 pixels */
- if (drm_rect_width(&state->dst) < 13)
+ if (drm_rect_width(&new_state->dst) < 13)
return -EINVAL;
break;
case DRM_PLANE_TYPE_OVERLAY:
@@ -384,7 +388,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- if (drm_rect_height(&state->dst) < 2)
+ if (drm_rect_height(&new_state->dst) < 2)
return -EINVAL;
/*
@@ -395,12 +399,12 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
* callback.
*/
if (old_fb &&
- (drm_rect_width(&state->dst) != drm_rect_width(&old_state->dst) ||
- drm_rect_height(&state->dst) != drm_rect_height(&old_state->dst) ||
+ (drm_rect_width(&new_state->dst) != drm_rect_width(&old_state->dst) ||
+ drm_rect_height(&new_state->dst) != drm_rect_height(&old_state->dst) ||
fb->format != old_fb->format))
crtc_state->mode_changed = true;
- eba = drm_plane_state_to_eba(state, 0);
+ eba = drm_plane_state_to_eba(new_state, 0);
if (eba & 0x7)
return -EINVAL;
@@ -426,7 +430,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
* - Only EBA may be changed while scanout is active
* - The strides of U and V planes must be identical.
*/
- vbo = drm_plane_state_to_vbo(state);
+ vbo = drm_plane_state_to_vbo(new_state);
if (vbo & 0x7 || vbo > 0xfffff8)
return -EINVAL;
@@ -443,7 +447,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
fallthrough;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
- ubo = drm_plane_state_to_ubo(state);
+ ubo = drm_plane_state_to_ubo(new_state);
if (ubo & 0x7 || ubo > 0xfffff8)
return -EINVAL;
@@ -464,8 +468,8 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
* The x/y offsets must be even in case of horizontal/vertical
* chroma subsampling.
*/
- if (((state->src.x1 >> 16) & (fb->format->hsub - 1)) ||
- ((state->src.y1 >> 16) & (fb->format->vsub - 1)))
+ if (((new_state->src.x1 >> 16) & (fb->format->hsub - 1)) ||
+ ((new_state->src.y1 >> 16) & (fb->format->vsub - 1)))
return -EINVAL;
break;
case DRM_FORMAT_RGB565_A8:
@@ -474,7 +478,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
case DRM_FORMAT_BGR888_A8:
case DRM_FORMAT_RGBX8888_A8:
case DRM_FORMAT_BGRX8888_A8:
- alpha_eba = drm_plane_state_to_eba(state, 1);
+ alpha_eba = drm_plane_state_to_eba(new_state, 1);
if (alpha_eba & 0x7)
return -EINVAL;
@@ -490,7 +494,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
}
static void ipu_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
@@ -535,14 +539,17 @@ static void ipu_calculate_bursts(u32 width, u32 cpp, u32 stride,
}
static void ipu_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
- struct drm_plane_state *state = plane->state;
- struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
- struct drm_crtc_state *crtc_state = state->crtc->state;
- struct drm_framebuffer *fb = state->fb;
- struct drm_rect *dst = &state->dst;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct ipu_plane_state *ipu_state = to_ipu_plane_state(new_state);
+ struct drm_crtc_state *crtc_state = new_state->crtc->state;
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_rect *dst = &new_state->dst;
unsigned long eba, ubo, vbo;
unsigned long alpha_eba = 0;
enum ipu_color_space ics;
@@ -557,7 +564,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
switch (ipu_plane->dp_flow) {
case IPU_DP_FLOW_SYNC_BG:
- if (state->normalized_zpos == 1) {
+ if (new_state->normalized_zpos == 1) {
ipu_dp_set_global_alpha(ipu_plane->dp,
!fb->format->has_alpha, 0xff,
true);
@@ -566,7 +573,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
}
break;
case IPU_DP_FLOW_SYNC_FG:
- if (state->normalized_zpos == 1) {
+ if (new_state->normalized_zpos == 1) {
ipu_dp_set_global_alpha(ipu_plane->dp,
!fb->format->has_alpha, 0xff,
false);
@@ -574,7 +581,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
break;
}
- eba = drm_plane_state_to_eba(state, 0);
+ eba = drm_plane_state_to_eba(new_state, 0);
/*
* Configure PRG channel and attached PRE, this changes the EBA to an
@@ -583,8 +590,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
if (ipu_state->use_pre) {
axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
- drm_rect_width(&state->src) >> 16,
- drm_rect_height(&state->src) >> 16,
+ drm_rect_width(&new_state->src) >> 16,
+ drm_rect_height(&new_state->src) >> 16,
fb->pitches[0], fb->format->format,
fb->modifier, &eba);
}
@@ -618,8 +625,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
- width = drm_rect_width(&state->src) >> 16;
- height = drm_rect_height(&state->src) >> 16;
+ width = drm_rect_width(&new_state->src) >> 16;
+ height = drm_rect_height(&new_state->src) >> 16;
info = drm_format_info(fb->format->format);
ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
&burstsize, &num_bursts);
@@ -641,8 +648,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
- ubo = drm_plane_state_to_ubo(state);
- vbo = drm_plane_state_to_vbo(state);
+ ubo = drm_plane_state_to_ubo(new_state);
+ vbo = drm_plane_state_to_vbo(new_state);
if (fb->format->format == DRM_FORMAT_YVU420 ||
fb->format->format == DRM_FORMAT_YVU422 ||
fb->format->format == DRM_FORMAT_YVU444)
@@ -653,18 +660,18 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
dev_dbg(ipu_plane->base.dev->dev,
"phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
- state->src.x1 >> 16, state->src.y1 >> 16);
+ new_state->src.x1 >> 16, new_state->src.y1 >> 16);
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
- ubo = drm_plane_state_to_ubo(state);
+ ubo = drm_plane_state_to_ubo(new_state);
ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
fb->pitches[1], ubo, ubo);
dev_dbg(ipu_plane->base.dev->dev,
"phy = %lu %lu, x = %d, y = %d", eba, ubo,
- state->src.x1 >> 16, state->src.y1 >> 16);
+ new_state->src.x1 >> 16, new_state->src.y1 >> 16);
break;
case DRM_FORMAT_RGB565_A8:
case DRM_FORMAT_BGR565_A8:
@@ -672,18 +679,19 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
case DRM_FORMAT_BGR888_A8:
case DRM_FORMAT_RGBX8888_A8:
case DRM_FORMAT_BGRX8888_A8:
- alpha_eba = drm_plane_state_to_eba(state, 1);
+ alpha_eba = drm_plane_state_to_eba(new_state, 1);
num_bursts = 0;
dev_dbg(ipu_plane->base.dev->dev, "phys = %lu %lu, x = %d, y = %d",
- eba, alpha_eba, state->src.x1 >> 16, state->src.y1 >> 16);
+ eba, alpha_eba, new_state->src.x1 >> 16,
+ new_state->src.y1 >> 16);
ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
ipu_cpmem_zero(ipu_plane->alpha_ch);
ipu_cpmem_set_resolution(ipu_plane->alpha_ch,
- drm_rect_width(&state->src) >> 16,
- drm_rect_height(&state->src) >> 16);
+ drm_rect_width(&new_state->src) >> 16,
+ drm_rect_height(&new_state->src) >> 16);
ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
ipu_idmac_set_double_buffer(ipu_plane->alpha_ch, 1);
@@ -694,7 +702,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
break;
default:
dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
- eba, state->src.x1 >> 16, state->src.y1 >> 16);
+ eba, new_state->src.x1 >> 16, new_state->src.y1 >> 16);
break;
}
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
@@ -704,7 +712,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = ipu_plane_atomic_check,
.atomic_disable = ipu_plane_atomic_disable,
.atomic_update = ipu_plane_atomic_update,
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index 7bb31fbee29d..54ee2cb61f3c 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -28,6 +28,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
#include <drm/drm_managed.h>
@@ -359,21 +360,26 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
}
static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
struct drm_crtc_state *crtc_state;
- struct drm_crtc *crtc = state->crtc ?: plane->state->crtc;
+ struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
int ret;
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
priv->soc_info->has_osd,
@@ -386,9 +392,9 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
* Note that state->src_* are in 16.16 fixed-point format.
*/
if (!priv->soc_info->has_osd &&
- (state->src_x != 0 ||
- (state->src_w >> 16) != state->crtc_w ||
- (state->src_h >> 16) != state->crtc_h))
+ (new_plane_state->src_x != 0 ||
+ (new_plane_state->src_w >> 16) != new_plane_state->crtc_w ||
+ (new_plane_state->src_h >> 16) != new_plane_state->crtc_h))
return -EINVAL;
/*
@@ -396,12 +402,12 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
* its position, size or depth.
*/
if (priv->soc_info->has_osd &&
- (!plane->state->fb || !state->fb ||
- plane->state->crtc_x != state->crtc_x ||
- plane->state->crtc_y != state->crtc_y ||
- plane->state->crtc_w != state->crtc_w ||
- plane->state->crtc_h != state->crtc_h ||
- plane->state->fb->format->format != state->fb->format->format))
+ (!old_plane_state->fb || !new_plane_state->fb ||
+ old_plane_state->crtc_x != new_plane_state->crtc_x ||
+ old_plane_state->crtc_y != new_plane_state->crtc_y ||
+ old_plane_state->crtc_w != new_plane_state->crtc_w ||
+ old_plane_state->crtc_h != new_plane_state->crtc_h ||
+ old_plane_state->fb->format->format != new_plane_state->fb->format->format))
crtc_state->mode_changed = true;
return 0;
@@ -438,7 +444,7 @@ void ingenic_drm_plane_disable(struct device *dev, struct drm_plane *plane)
}
static void ingenic_drm_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
@@ -536,23 +542,24 @@ static void ingenic_drm_update_palette(struct ingenic_drm *priv,
}
static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *oldstate)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state;
struct ingenic_dma_hwdesc *hwdesc;
unsigned int width, height, cpp, offset;
dma_addr_t addr;
u32 fourcc;
- if (state && state->fb) {
- crtc_state = state->crtc->state;
+ if (newstate && newstate->fb) {
+ crtc_state = newstate->crtc->state;
- addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
- width = state->src_w >> 16;
- height = state->src_h >> 16;
- cpp = state->fb->format->cpp[0];
+ addr = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0);
+ width = newstate->src_w >> 16;
+ height = newstate->src_h >> 16;
+ cpp = newstate->fb->format->cpp[0];
if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
hwdesc = &priv->dma_hwdescs->hwdesc_f0;
@@ -563,7 +570,7 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4);
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
- fourcc = state->fb->format->format;
+ fourcc = newstate->fb->format->format;
ingenic_drm_plane_config(priv->dev, plane, fourcc);
@@ -780,7 +787,7 @@ static const struct drm_plane_helper_funcs ingenic_drm_plane_helper_funcs = {
.atomic_update = ingenic_drm_plane_atomic_update,
.atomic_check = ingenic_drm_plane_atomic_check,
.atomic_disable = ingenic_drm_plane_atomic_disable,
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
};
static const struct drm_crtc_helper_funcs ingenic_drm_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-ipu.c b/drivers/gpu/drm/ingenic/ingenic-ipu.c
index e52777ef85fd..5ae6adab8306 100644
--- a/drivers/gpu/drm/ingenic/ingenic-ipu.c
+++ b/drivers/gpu/drm/ingenic/ingenic-ipu.c
@@ -23,7 +23,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_property.h>
@@ -282,19 +282,20 @@ static inline bool osd_changed(struct drm_plane_state *state,
}
static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *oldstate)
+ struct drm_atomic_state *state)
{
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
+ plane);
const struct drm_format_info *finfo;
u32 ctrl, stride = 0, coef_index = 0, format = 0;
bool needs_modeset, upscaling_w, upscaling_h;
int err;
- if (!state || !state->fb)
+ if (!newstate || !newstate->fb)
return;
- finfo = drm_format_info(state->fb->format->format);
+ finfo = drm_format_info(newstate->fb->format->format);
if (!ipu->clk_enabled) {
err = clk_enable(ipu->clk);
@@ -307,7 +308,7 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
}
/* Reset all the registers if needed */
- needs_modeset = drm_atomic_crtc_needs_modeset(state->crtc->state);
+ needs_modeset = drm_atomic_crtc_needs_modeset(newstate->crtc->state);
if (needs_modeset) {
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_RST);
@@ -317,11 +318,13 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
}
/* New addresses will be committed in vblank handler... */
- ipu->addr_y = drm_fb_cma_get_gem_addr(state->fb, state, 0);
+ ipu->addr_y = drm_fb_cma_get_gem_addr(newstate->fb, newstate, 0);
if (finfo->num_planes > 1)
- ipu->addr_u = drm_fb_cma_get_gem_addr(state->fb, state, 1);
+ ipu->addr_u = drm_fb_cma_get_gem_addr(newstate->fb, newstate,
+ 1);
if (finfo->num_planes > 2)
- ipu->addr_v = drm_fb_cma_get_gem_addr(state->fb, state, 2);
+ ipu->addr_v = drm_fb_cma_get_gem_addr(newstate->fb, newstate,
+ 2);
if (!needs_modeset)
return;
@@ -338,21 +341,21 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
/* Set the input height/width/strides */
if (finfo->num_planes > 2)
- stride = ((state->src_w >> 16) * finfo->cpp[2] / finfo->hsub)
+ stride = ((newstate->src_w >> 16) * finfo->cpp[2] / finfo->hsub)
<< JZ_IPU_UV_STRIDE_V_LSB;
if (finfo->num_planes > 1)
- stride |= ((state->src_w >> 16) * finfo->cpp[1] / finfo->hsub)
+ stride |= ((newstate->src_w >> 16) * finfo->cpp[1] / finfo->hsub)
<< JZ_IPU_UV_STRIDE_U_LSB;
regmap_write(ipu->map, JZ_REG_IPU_UV_STRIDE, stride);
- stride = ((state->src_w >> 16) * finfo->cpp[0]) << JZ_IPU_Y_STRIDE_Y_LSB;
+ stride = ((newstate->src_w >> 16) * finfo->cpp[0]) << JZ_IPU_Y_STRIDE_Y_LSB;
regmap_write(ipu->map, JZ_REG_IPU_Y_STRIDE, stride);
regmap_write(ipu->map, JZ_REG_IPU_IN_GS,
(stride << JZ_IPU_IN_GS_W_LSB) |
- ((state->src_h >> 16) << JZ_IPU_IN_GS_H_LSB));
+ ((newstate->src_h >> 16) << JZ_IPU_IN_GS_H_LSB));
switch (finfo->format) {
case DRM_FORMAT_XRGB1555:
@@ -421,9 +424,9 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
/* Set the output height/width/stride */
regmap_write(ipu->map, JZ_REG_IPU_OUT_GS,
- ((state->crtc_w * 4) << JZ_IPU_OUT_GS_W_LSB)
- | state->crtc_h << JZ_IPU_OUT_GS_H_LSB);
- regmap_write(ipu->map, JZ_REG_IPU_OUT_STRIDE, state->crtc_w * 4);
+ ((newstate->crtc_w * 4) << JZ_IPU_OUT_GS_W_LSB)
+ | newstate->crtc_h << JZ_IPU_OUT_GS_H_LSB);
+ regmap_write(ipu->map, JZ_REG_IPU_OUT_STRIDE, newstate->crtc_w * 4);
if (finfo->is_yuv) {
regmap_set_bits(ipu->map, JZ_REG_IPU_CTRL, JZ_IPU_CTRL_CSC_EN);
@@ -508,55 +511,59 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
JZ_IPU_CTRL_RUN | JZ_IPU_CTRL_FM_IRQ_EN);
dev_dbg(ipu->dev, "Scaling %ux%u to %ux%u (%u:%u horiz, %u:%u vert)\n",
- state->src_w >> 16, state->src_h >> 16,
- state->crtc_w, state->crtc_h,
+ newstate->src_w >> 16, newstate->src_h >> 16,
+ newstate->crtc_w, newstate->crtc_h,
ipu->num_w, ipu->denom_w, ipu->num_h, ipu->denom_h);
}
static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
unsigned int num_w, denom_w, num_h, denom_h, xres, yres, max_w, max_h;
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
- struct drm_crtc *crtc = state->crtc ?: plane->state->crtc;
+ struct drm_crtc *crtc = new_plane_state->crtc ?: old_plane_state->crtc;
struct drm_crtc_state *crtc_state;
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state, crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
/* Request a full modeset if we are enabling or disabling the IPU. */
- if (!plane->state->crtc ^ !state->crtc)
+ if (!old_plane_state->crtc ^ !new_plane_state->crtc)
crtc_state->mode_changed = true;
- if (!state->crtc ||
+ if (!new_plane_state->crtc ||
!crtc_state->mode.hdisplay || !crtc_state->mode.vdisplay)
return 0;
/* Plane must be fully visible */
- if (state->crtc_x < 0 || state->crtc_y < 0 ||
- state->crtc_x + state->crtc_w > crtc_state->mode.hdisplay ||
- state->crtc_y + state->crtc_h > crtc_state->mode.vdisplay)
+ if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0 ||
+ new_plane_state->crtc_x + new_plane_state->crtc_w > crtc_state->mode.hdisplay ||
+ new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->mode.vdisplay)
return -EINVAL;
/* Minimum size is 4x4 */
- if ((state->src_w >> 16) < 4 || (state->src_h >> 16) < 4)
+ if ((new_plane_state->src_w >> 16) < 4 || (new_plane_state->src_h >> 16) < 4)
return -EINVAL;
/* Input and output lines must have an even number of pixels. */
- if (((state->src_w >> 16) & 1) || (state->crtc_w & 1))
+ if (((new_plane_state->src_w >> 16) & 1) || (new_plane_state->crtc_w & 1))
return -EINVAL;
- if (!osd_changed(state, plane->state))
+ if (!osd_changed(new_plane_state, old_plane_state))
return 0;
crtc_state->mode_changed = true;
- xres = state->src_w >> 16;
- yres = state->src_h >> 16;
+ xres = new_plane_state->src_w >> 16;
+ yres = new_plane_state->src_h >> 16;
/*
* Increase the scaled image's theorical width/height until we find a
@@ -568,13 +575,13 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
max_w = crtc_state->mode.hdisplay * 102 / 100;
max_h = crtc_state->mode.vdisplay * 102 / 100;
- for (denom_w = xres, num_w = state->crtc_w; num_w <= max_w; num_w++)
+ for (denom_w = xres, num_w = new_plane_state->crtc_w; num_w <= max_w; num_w++)
if (!reduce_fraction(&num_w, &denom_w))
break;
if (num_w > max_w)
return -EINVAL;
- for (denom_h = yres, num_h = state->crtc_h; num_h <= max_h; num_h++)
+ for (denom_h = yres, num_h = new_plane_state->crtc_h; num_h <= max_h; num_h++)
if (!reduce_fraction(&num_h, &denom_h))
break;
if (num_h > max_h)
@@ -589,7 +596,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
}
static void ingenic_ipu_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct ingenic_ipu *ipu = plane_to_ingenic_ipu(plane);
@@ -608,7 +615,7 @@ static const struct drm_plane_helper_funcs ingenic_ipu_plane_helper_funcs = {
.atomic_update = ingenic_ipu_plane_atomic_update,
.atomic_check = ingenic_ipu_plane_atomic_check,
.atomic_disable = ingenic_ipu_plane_atomic_disable,
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
};
static int
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index be8eea3830c1..d5b6195856d1 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -77,36 +77,40 @@ static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
}
static int kmb_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_framebuffer *fb;
int ret;
struct drm_crtc_state *crtc_state;
bool can_position;
- fb = state->fb;
- if (!fb || !state->crtc)
+ fb = new_plane_state->fb;
+ if (!fb || !new_plane_state->crtc)
return 0;
ret = check_pixel_format(plane, fb->format->format);
if (ret)
return ret;
- if (state->crtc_w > KMB_MAX_WIDTH || state->crtc_h > KMB_MAX_HEIGHT)
+ if (new_plane_state->crtc_w > KMB_MAX_WIDTH || new_plane_state->crtc_h > KMB_MAX_HEIGHT)
return -EINVAL;
- if (state->crtc_w < KMB_MIN_WIDTH || state->crtc_h < KMB_MIN_HEIGHT)
+ if (new_plane_state->crtc_w < KMB_MIN_WIDTH || new_plane_state->crtc_h < KMB_MIN_HEIGHT)
return -EINVAL;
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
crtc_state =
- drm_atomic_get_existing_crtc_state(state->state, state->crtc);
- return drm_atomic_helper_check_plane_state(state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- can_position, true);
+ drm_atomic_get_existing_crtc_state(state,
+ new_plane_state->crtc);
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ can_position, true);
}
static void kmb_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
struct kmb_plane *kmb_plane = to_kmb_plane(plane);
int plane_id = kmb_plane->id;
@@ -274,8 +278,12 @@ static void config_csc(struct kmb_drm_private *kmb, int plane_id)
}
static void kmb_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_framebuffer *fb;
struct kmb_drm_private *kmb;
unsigned int width;
@@ -289,10 +297,10 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
int num_planes;
static dma_addr_t addr[MAX_SUB_PLANES];
- if (!plane || !plane->state || !state)
+ if (!plane || !new_plane_state || !old_plane_state)
return;
- fb = plane->state->fb;
+ fb = new_plane_state->fb;
if (!fb)
return;
num_planes = fb->format->num_planes;
@@ -309,10 +317,10 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
}
spin_unlock_irq(&kmb->irq_lock);
- src_w = (plane->state->src_w >> 16);
- src_h = plane->state->src_h >> 16;
- crtc_x = plane->state->crtc_x;
- crtc_y = plane->state->crtc_y;
+ src_w = (new_plane_state->src_w >> 16);
+ src_h = new_plane_state->src_h >> 16;
+ crtc_x = new_plane_state->crtc_x;
+ crtc_y = new_plane_state->crtc_y;
drm_dbg(&kmb->drm,
"src_w=%d src_h=%d, fb->format->format=0x%x fb->flags=0x%x\n",
@@ -329,7 +337,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
kmb_write_lcd(kmb, LCD_LAYERn_DMA_LINE_WIDTH(plane_id),
(width * fb->format->cpp[0]));
- addr[Y_PLANE] = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
+ addr[Y_PLANE] = drm_fb_cma_get_gem_addr(fb, new_plane_state, 0);
kmb_write_lcd(kmb, LCD_LAYERn_DMA_START_ADDR(plane_id),
addr[Y_PLANE] + fb->offsets[0]);
val = get_pixel_format(fb->format->format);
@@ -341,7 +349,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CB_LINE_WIDTH(plane_id),
(width * fb->format->cpp[0]));
- addr[U_PLANE] = drm_fb_cma_get_gem_addr(fb, plane->state,
+ addr[U_PLANE] = drm_fb_cma_get_gem_addr(fb, new_plane_state,
U_PLANE);
/* check if Cb/Cr is swapped*/
if (num_planes == 3 && (val & LCD_LAYER_CRCB_ORDER))
@@ -363,7 +371,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
((width) * fb->format->cpp[0]));
addr[V_PLANE] = drm_fb_cma_get_gem_addr(fb,
- plane->state,
+ new_plane_state,
V_PLANE);
/* check if Cb/Cr is swapped*/
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
index 5686ad4aaf7c..4f64940b9055 100644
--- a/drivers/gpu/drm/lima/lima_devfreq.c
+++ b/drivers/gpu/drm/lima/lima_devfreq.c
@@ -81,6 +81,7 @@ static int lima_devfreq_get_dev_status(struct device *dev,
}
static struct devfreq_dev_profile lima_devfreq_profile = {
+ .timer = DEVFREQ_TIMER_DELAYED,
.polling_ms = 50, /* ~3 frames */
.target = lima_devfreq_target,
.get_dev_status = lima_devfreq_get_dev_status,
@@ -163,8 +164,16 @@ int lima_devfreq_init(struct lima_device *ldev)
lima_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
+ /*
+ * Setup default thresholds for the simple_ondemand governor.
+ * The values are chosen based on experiments.
+ */
+ ldevfreq->gov_data.upthreshold = 30;
+ ldevfreq->gov_data.downdifferential = 5;
+
devfreq = devm_devfreq_add_device(dev, &lima_devfreq_profile,
- DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &ldevfreq->gov_data);
if (IS_ERR(devfreq)) {
dev_err(dev, "Couldn't initialize GPU devfreq\n");
ret = PTR_ERR(devfreq);
diff --git a/drivers/gpu/drm/lima/lima_devfreq.h b/drivers/gpu/drm/lima/lima_devfreq.h
index 2d9b3008ce77..b0c7c736e81a 100644
--- a/drivers/gpu/drm/lima/lima_devfreq.h
+++ b/drivers/gpu/drm/lima/lima_devfreq.h
@@ -4,6 +4,7 @@
#ifndef __LIMA_DEVFREQ_H__
#define __LIMA_DEVFREQ_H__
+#include <linux/devfreq.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
@@ -18,6 +19,7 @@ struct lima_devfreq {
struct opp_table *clkname_opp_table;
struct opp_table *regulators_opp_table;
struct thermal_cooling_device *cooling;
+ struct devfreq_simple_ondemand_data gov_data;
ktime_t busy_time;
ktime_t idle_time;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 5cc20b403a25..ecf3267334ff 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -415,7 +415,7 @@ out:
mutex_unlock(&dev->error_task_list_lock);
}
-static void lima_sched_timedout_job(struct drm_sched_job *job)
+static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job)
{
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_sched_task *task = to_lima_task(job);
@@ -449,6 +449,8 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, true);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
static void lima_sched_free_job(struct drm_sched_job *job)
@@ -507,7 +509,7 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
lima_job_hang_limit, msecs_to_jiffies(timeout),
- name);
+ NULL, name);
}
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 7c2e0b865441..4ddc55d58f38 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -13,8 +13,8 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_bridge.h>
@@ -1161,7 +1161,6 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
int dsi_pkt_size;
int fifo_wtrmrk;
int cpp = fb->format->cpp[0];
- struct drm_format_name_buf tmp;
u32 dsi_formatter_frame;
u32 val;
int ret;
@@ -1173,9 +1172,8 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
return;
}
- dev_info(drm->dev, "enable MCDE, %d x %d format %s\n",
- mode->hdisplay, mode->vdisplay,
- drm_get_format_name(format, &tmp));
+ dev_info(drm->dev, "enable MCDE, %d x %d format %p4cc\n",
+ mode->hdisplay, mode->vdisplay, &format);
/* Clear any pending interrupts */
@@ -1481,7 +1479,7 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
.update = mcde_display_update,
.enable_vblank = mcde_display_enable_vblank,
.disable_vblank = mcde_display_disable_vblank,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
int mcde_display_init(struct drm_device *drm)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 8b0de90156c6..54ab3a324752 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -522,7 +522,7 @@ int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
}
void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
const struct drm_plane_helper_funcs *plane_helper_funcs =
@@ -531,7 +531,7 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
if (!mtk_crtc->enabled)
return;
- plane_helper_funcs->atomic_update(plane, new_state);
+ plane_helper_funcs->atomic_update(plane, state);
mtk_drm_crtc_hw_config(mtk_crtc);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 45cfd0a032de..cb9a36c48d4f 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -21,6 +21,6 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane,
struct mtk_plane_state *state);
void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
- struct drm_plane_state *plane_state);
+ struct drm_atomic_state *plane_state);
#endif /* MTK_DRM_CRTC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 92141a19681b..b5582dcf564c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -6,10 +6,10 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fourcc.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include "mtk_drm_crtc.h"
#include "mtk_drm_ddp_comp.h"
@@ -77,12 +77,14 @@ static void mtk_drm_plane_destroy_state(struct drm_plane *plane,
}
static int mtk_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state;
int ret;
- if (plane != state->crtc->cursor)
+ if (plane != new_plane_state->crtc->cursor)
return -EINVAL;
if (!plane->state)
@@ -91,16 +93,16 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
- ret = mtk_drm_crtc_plane_check(state->crtc, plane,
- to_mtk_plane_state(state));
+ ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane,
+ to_mtk_plane_state(new_plane_state));
if (ret)
return ret;
- if (state->state)
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ if (state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_plane_state->crtc);
else /* Special case for asynchronous cursor updates. */
- crtc_state = state->crtc->state;
+ crtc_state = new_plane_state->crtc->state;
return drm_atomic_helper_check_plane_state(plane->state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
@@ -109,9 +111,11 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
}
static void mtk_plane_atomic_async_update(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_atomic_state *state)
{
- struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct mtk_plane_state *new_plane_state = to_mtk_plane_state(plane->state);
plane->state->crtc_x = new_state->crtc_x;
plane->state->crtc_y = new_state->crtc_y;
@@ -122,9 +126,9 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_h = new_state->src_h;
plane->state->src_w = new_state->src_w;
swap(plane->state->fb, new_state->fb);
- state->pending.async_dirty = true;
+ new_plane_state->pending.async_dirty = true;
- mtk_drm_crtc_async_update(new_state->crtc, plane, new_state);
+ mtk_drm_crtc_async_update(new_state->crtc, plane, state);
}
static const struct drm_plane_funcs mtk_plane_funcs = {
@@ -137,49 +141,56 @@ static const struct drm_plane_funcs mtk_plane_funcs = {
};
static int mtk_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_framebuffer *fb = state->fb;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
int ret;
if (!fb)
return 0;
- if (WARN_ON(!state->crtc))
+ if (WARN_ON(!new_plane_state->crtc))
return 0;
- ret = mtk_drm_crtc_plane_check(state->crtc, plane,
- to_mtk_plane_state(state));
+ ret = mtk_drm_crtc_plane_check(new_plane_state->crtc, plane,
+ to_mtk_plane_state(new_plane_state));
if (ret)
return ret;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- return drm_atomic_helper_check_plane_state(state, crtc_state,
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true);
}
static void mtk_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-
- state->pending.enable = false;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
+ mtk_plane_state->pending.enable = false;
wmb(); /* Make sure the above parameter is set before update */
- state->pending.dirty = true;
+ mtk_plane_state->pending.dirty = true;
}
static void mtk_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
- struct drm_crtc *crtc = plane->state->crtc;
- struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
+ struct drm_crtc *crtc = new_state->crtc;
+ struct drm_framebuffer *fb = new_state->fb;
struct drm_gem_object *gem;
struct mtk_drm_gem_obj *mtk_gem;
unsigned int pitch, format;
@@ -188,8 +199,8 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return;
- if (!plane->state->visible) {
- mtk_plane_atomic_disable(plane, old_state);
+ if (!new_state->visible) {
+ mtk_plane_atomic_disable(plane, state);
return;
}
@@ -199,24 +210,24 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
pitch = fb->pitches[0];
format = fb->format->format;
- addr += (plane->state->src.x1 >> 16) * fb->format->cpp[0];
- addr += (plane->state->src.y1 >> 16) * pitch;
-
- state->pending.enable = true;
- state->pending.pitch = pitch;
- state->pending.format = format;
- state->pending.addr = addr;
- state->pending.x = plane->state->dst.x1;
- state->pending.y = plane->state->dst.y1;
- state->pending.width = drm_rect_width(&plane->state->dst);
- state->pending.height = drm_rect_height(&plane->state->dst);
- state->pending.rotation = plane->state->rotation;
+ addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
+ addr += (new_state->src.y1 >> 16) * pitch;
+
+ mtk_plane_state->pending.enable = true;
+ mtk_plane_state->pending.pitch = pitch;
+ mtk_plane_state->pending.format = format;
+ mtk_plane_state->pending.addr = addr;
+ mtk_plane_state->pending.x = new_state->dst.x1;
+ mtk_plane_state->pending.y = new_state->dst.y1;
+ mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
+ mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
+ mtk_plane_state->pending.rotation = new_state->rotation;
wmb(); /* Make sure the above parameters are set before update */
- state->pending.dirty = true;
+ mtk_plane_state->pending.dirty = true;
}
static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = mtk_plane_atomic_check,
.atomic_update = mtk_plane_atomic_update,
.atomic_disable = mtk_plane_atomic_disable,
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index 1ffbbecafa22..ed063152aecd 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -10,11 +10,11 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
+#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_plane_helper.h>
#include "meson_overlay.h"
#include "meson_registers.h"
@@ -165,18 +165,22 @@ struct meson_overlay {
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int meson_overlay_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state;
- if (!state->crtc)
+ if (!new_plane_state->crtc)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- return drm_atomic_helper_check_plane_state(state, crtc_state,
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
FRAC_16_16(1, 5),
FRAC_16_16(5, 1),
true, true);
@@ -464,11 +468,12 @@ static void meson_overlay_setup_scaler_params(struct meson_drm *priv,
}
static void meson_overlay_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_overlay *meson_overlay = to_meson_overlay(plane);
- struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *fb = new_state->fb;
struct meson_drm *priv = meson_overlay->priv;
struct drm_gem_cma_object *gem;
unsigned long flags;
@@ -476,7 +481,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
DRM_DEBUG_DRIVER("\n");
- interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
+ interlace_mode = new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
spin_lock_irqsave(&priv->drm->event_lock, flags);
@@ -717,7 +722,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
}
static void meson_overlay_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_overlay *meson_overlay = to_meson_overlay(plane);
struct meson_drm *priv = meson_overlay->priv;
@@ -742,7 +747,7 @@ static const struct drm_plane_helper_funcs meson_overlay_helper_funcs = {
.atomic_check = meson_overlay_atomic_check,
.atomic_disable = meson_overlay_atomic_disable,
.atomic_update = meson_overlay_atomic_update,
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
};
static bool meson_overlay_format_mod_supported(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 35338ed18209..a18510dae4c8 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -16,8 +16,8 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include "meson_plane.h"
@@ -71,14 +71,17 @@ struct meson_plane {
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int meson_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state;
- if (!state->crtc)
+ if (!new_plane_state->crtc)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -87,7 +90,8 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
* - Upscaling up to 5x, vertical and horizontal
* - Final coordinates must match crtc size
*/
- return drm_atomic_helper_check_plane_state(state, crtc_state,
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
FRAC_16_16(1, 5),
DRM_PLANE_HELPER_NO_SCALING,
false, true);
@@ -126,13 +130,14 @@ static u32 meson_g12a_afbcd_line_stride(struct meson_drm *priv)
}
static void meson_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_plane *meson_plane = to_meson_plane(plane);
- struct drm_plane_state *state = plane->state;
- struct drm_rect dest = drm_plane_state_dest(state);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_rect dest = drm_plane_state_dest(new_state);
struct meson_drm *priv = meson_plane->priv;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = new_state->fb;
struct drm_gem_cma_object *gem;
unsigned long flags;
int vsc_ini_rcv_num, vsc_ini_rpt_p0_num;
@@ -245,7 +250,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
hf_bank_len = 4;
vf_bank_len = 4;
- if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
vsc_bot_rcv_num = 6;
vsc_bot_rpt_p0_num = 2;
}
@@ -255,10 +260,10 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
hsc_ini_rpt_p0_num = (hf_bank_len / 2) - 1;
vsc_ini_rpt_p0_num = (vf_bank_len / 2) - 1;
- src_w = fixed16_to_int(state->src_w);
- src_h = fixed16_to_int(state->src_h);
- dst_w = state->crtc_w;
- dst_h = state->crtc_h;
+ src_w = fixed16_to_int(new_state->src_w);
+ src_h = fixed16_to_int(new_state->src_h);
+ dst_w = new_state->crtc_w;
+ dst_h = new_state->crtc_h;
/*
* When the output is interlaced, the OSD must switch between
@@ -267,7 +272,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
* But the vertical scaler can provide such funtionnality if
* is configured for 2:1 scaling with interlace options enabled.
*/
- if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
dest.y1 /= 2;
dest.y2 /= 2;
dst_h /= 2;
@@ -276,7 +281,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
hf_phase_step = ((src_w << 18) / dst_w) << 6;
vf_phase_step = (src_h << 20) / dst_h;
- if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
bot_ini_phase = ((vf_phase_step / 2) >> 4);
else
bot_ini_phase = 0;
@@ -308,7 +313,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
VSC_TOP_RPT_L0_NUM(vsc_ini_rpt_p0_num) |
VSC_VERTICAL_SCALER_EN;
- if (state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ if (new_state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
priv->viu.osd_sc_v_ctrl0 |=
VSC_BOT_INI_RCV_NUM(vsc_bot_rcv_num) |
VSC_BOT_RPT_L0_NUM(vsc_bot_rpt_p0_num) |
@@ -343,11 +348,11 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
* e.g. +30x1920 would be (1919 << 16) | 30
*/
priv->viu.osd1_blk0_cfg[1] =
- ((fixed16_to_int(state->src.x2) - 1) << 16) |
- fixed16_to_int(state->src.x1);
+ ((fixed16_to_int(new_state->src.x2) - 1) << 16) |
+ fixed16_to_int(new_state->src.x1);
priv->viu.osd1_blk0_cfg[2] =
- ((fixed16_to_int(state->src.y2) - 1) << 16) |
- fixed16_to_int(state->src.y1);
+ ((fixed16_to_int(new_state->src.y2) - 1) << 16) |
+ fixed16_to_int(new_state->src.y1);
priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
@@ -391,7 +396,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
}
static void meson_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_plane *meson_plane = to_meson_plane(plane);
struct meson_drm *priv = meson_plane->priv;
@@ -417,7 +422,7 @@ static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
.atomic_check = meson_plane_atomic_check,
.atomic_disable = meson_plane_atomic_disable,
.atomic_update = meson_plane_atomic_update,
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
};
static bool meson_plane_format_mod_supported(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 1dfc42170059..cece3e57fb27 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -17,6 +17,7 @@
#include <drm/drm_damage_helper.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_print.h>
@@ -706,13 +707,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
{
+ static const unsigned int m_div_val[] = { 1, 2, 4, 8 };
unsigned int vcomax, vcomin, pllreffreq;
unsigned int delta, tmpdelta;
int testr, testn, testm, testo;
unsigned int p, m, n;
unsigned int computed, vco;
int tmp;
- const unsigned int m_div_val[] = { 1, 2, 4, 8 };
m = n = p = 0;
vcomax = 1488000;
@@ -1549,22 +1550,12 @@ mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
static void
mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb,
- struct drm_rect *clip)
+ struct drm_rect *clip, const struct dma_buf_map *map)
{
- struct drm_device *dev = &mdev->base;
- struct dma_buf_map map;
- void *vmap;
- int ret;
-
- ret = drm_gem_shmem_vmap(fb->obj[0], &map);
- if (drm_WARN_ON(dev, ret))
- return; /* BUG: SHMEM BO should always be vmapped */
- vmap = map.vaddr; /* TODO: Use mapping abstraction properly */
+ void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
drm_fb_memcpy_dstclip(mdev->vram, vmap, fb, clip);
- drm_gem_shmem_vunmap(fb->obj[0], &map);
-
/* Always scanout image at VRAM offset 0 */
mgag200_set_startadd(mdev, (u32)0);
mgag200_set_offset(mdev, fb);
@@ -1580,6 +1571,7 @@ mgag200_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
struct mga_device *mdev = to_mga_device(dev);
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_rect fullscreen = {
.x1 = 0,
.x2 = fb->width,
@@ -1608,7 +1600,7 @@ mgag200_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
mga_crtc_load_lut(crtc);
mgag200_enable_display(mdev);
- mgag200_handle_damage(mdev, fb, &fullscreen);
+ mgag200_handle_damage(mdev, fb, &fullscreen, &shadow_plane_state->map[0]);
}
static void
@@ -1649,6 +1641,7 @@ mgag200_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_device *dev = plane->dev;
struct mga_device *mdev = to_mga_device(dev);
struct drm_plane_state *state = plane->state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_rect damage;
@@ -1656,7 +1649,7 @@ mgag200_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_state, state, &damage))
- mgag200_handle_damage(mdev, fb, &damage);
+ mgag200_handle_damage(mdev, fb, &damage, &shadow_plane_state->map[0]);
}
static const struct drm_simple_display_pipe_funcs
@@ -1666,7 +1659,7 @@ mgag200_simple_display_pipe_funcs = {
.disable = mgag200_simple_display_pipe_disable,
.check = mgag200_simple_display_pipe_check,
.update = mgag200_simple_display_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
};
static const uint32_t mgag200_simple_display_pipe_formats[] = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 56eb22554197..9607a7644d4b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -71,7 +71,6 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
{
struct dpu_hw_mixer *lm = mixer->hw_lm;
uint32_t blend_op;
- struct drm_format_name_buf format_name;
/* default to opaque blending */
blend_op = DPU_BLEND_FG_ALPHA_FG_CONST |
@@ -87,9 +86,8 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
lm->ops.setup_blend_config(lm, pstate->stage,
0xFF, 0, blend_op);
- DPU_DEBUG("format:%s, alpha_en:%u blend_op:0x%x\n",
- drm_get_format_name(format->base.pixel_format, &format_name),
- format->alpha_enable, blend_op);
+ DPU_DEBUG("format:%p4cc, alpha_en:%u blend_op:0x%x\n",
+ &format->base.pixel_format, format->alpha_enable, blend_op);
}
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
@@ -576,7 +574,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
* of those planes explicitly here prior to plane flush.
*/
drm_atomic_crtc_for_each_plane(plane, crtc)
- dpu_plane_restore(plane);
+ dpu_plane_restore(plane, state);
/* update performance setting before crtc kickoff */
dpu_core_perf_crtc_update(crtc, 1, false);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index f898a8f67b7f..df7f3d3afd8b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -10,10 +10,11 @@
#include <linux/debugfs.h>
#include <linux/dma-buf.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_file.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include "msm_drv.h"
#include "dpu_kms.h"
@@ -892,7 +893,7 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
* we can use msm_atomic_prepare_fb() instead of doing the
* implicit fence and fb prepare by hand here.
*/
- drm_gem_fb_prepare_fb(plane, new_state);
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
if (pstate->aspace) {
ret = msm_framebuffer_prepare(new_state->fb,
@@ -950,44 +951,47 @@ static bool dpu_plane_validate_src(struct drm_rect *src,
}
static int dpu_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
int ret = 0, min_scale;
struct dpu_plane *pdpu = to_dpu_plane(plane);
- struct dpu_plane_state *pstate = to_dpu_plane_state(state);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state);
const struct drm_crtc_state *crtc_state = NULL;
const struct dpu_format *fmt;
struct drm_rect src, dst, fb_rect = { 0 };
uint32_t min_src_size, max_linewidth;
- if (state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(state->state,
- state->crtc);
+ if (new_plane_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_plane_state->crtc);
min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale);
- ret = drm_atomic_helper_check_plane_state(state, crtc_state, min_scale,
- pdpu->pipe_sblk->maxdwnscale << 16,
- true, true);
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ min_scale,
+ pdpu->pipe_sblk->maxdwnscale << 16,
+ true, true);
if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
return ret;
}
- if (!state->visible)
+ if (!new_plane_state->visible)
return 0;
- src.x1 = state->src_x >> 16;
- src.y1 = state->src_y >> 16;
- src.x2 = src.x1 + (state->src_w >> 16);
- src.y2 = src.y1 + (state->src_h >> 16);
+ src.x1 = new_plane_state->src_x >> 16;
+ src.y1 = new_plane_state->src_y >> 16;
+ src.x2 = src.x1 + (new_plane_state->src_w >> 16);
+ src.y2 = src.y1 + (new_plane_state->src_h >> 16);
- dst = drm_plane_state_dest(state);
+ dst = drm_plane_state_dest(new_plane_state);
- fb_rect.x2 = state->fb->width;
- fb_rect.y2 = state->fb->height;
+ fb_rect.x2 = new_plane_state->fb->width;
+ fb_rect.y2 = new_plane_state->fb->height;
max_linewidth = pdpu->catalog->caps->max_linewidth;
- fmt = to_dpu_format(msm_framebuffer_format(state->fb));
+ fmt = to_dpu_format(msm_framebuffer_format(new_plane_state->fb));
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
@@ -1237,23 +1241,24 @@ static void _dpu_plane_atomic_disable(struct drm_plane *plane)
}
static void dpu_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
pdpu->is_error = false;
DPU_DEBUG_PLANE(pdpu, "\n");
- if (!state->visible) {
+ if (!new_state->visible) {
_dpu_plane_atomic_disable(plane);
} else {
dpu_plane_sspp_atomic_update(plane);
}
}
-void dpu_plane_restore(struct drm_plane *plane)
+void dpu_plane_restore(struct drm_plane *plane, struct drm_atomic_state *state)
{
struct dpu_plane *pdpu;
@@ -1266,8 +1271,7 @@ void dpu_plane_restore(struct drm_plane *plane)
DPU_DEBUG_PLANE(pdpu, "\n");
- /* last plane state is same as current state */
- dpu_plane_atomic_update(plane, plane->state);
+ dpu_plane_atomic_update(plane, state);
}
static void dpu_plane_destroy(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 13a983fa8213..03b6365a750c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -88,7 +88,7 @@ void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
* dpu_plane_restore - restore hw state if previously power collapsed
* @plane: Pointer to drm plane structure
*/
-void dpu_plane_restore(struct drm_plane *plane);
+void dpu_plane_restore(struct drm_plane *plane, struct drm_atomic_state *state);
/**
* dpu_plane_flush - final plane operations before commit flush
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index da3cc1d8c331..9aecca919f24 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
@@ -106,23 +107,24 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
static int mdp4_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
return 0;
}
static void mdp4_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
int ret;
ret = mdp4_plane_mode_set(plane,
- state->crtc, state->fb,
- state->crtc_x, state->crtc_y,
- state->crtc_w, state->crtc_h,
- state->src_x, state->src_y,
- state->src_w, state->src_h);
+ new_state->crtc, new_state->fb,
+ new_state->crtc_x, new_state->crtc_y,
+ new_state->crtc_w, new_state->crtc_h,
+ new_state->src_x, new_state->src_y,
+ new_state->src_w, new_state->src_h);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 83423092de2f..8c9f2f492178 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -5,6 +5,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
@@ -403,76 +404,84 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- crtc = state->crtc ? state->crtc : plane->state->crtc;
+ crtc = new_plane_state->crtc ? new_plane_state->crtc : old_plane_state->crtc;
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
- return mdp5_plane_atomic_check_with_state(crtc_state, state);
+ return mdp5_plane_atomic_check_with_state(crtc_state, new_plane_state);
}
static void mdp5_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
DBG("%s: update", plane->name);
- if (plane_enabled(state)) {
+ if (plane_enabled(new_state)) {
int ret;
ret = mdp5_plane_mode_set(plane,
- state->crtc, state->fb,
- &state->src, &state->dst);
+ new_state->crtc, new_state->fb,
+ &new_state->src, &new_state->dst);
/* atomic_check should have ensured that this doesn't fail */
WARN_ON(ret < 0);
}
}
static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(new_plane_state);
struct drm_crtc_state *crtc_state;
int min_scale, max_scale;
int ret;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_plane_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
if (!crtc_state->active)
return -EINVAL;
- mdp5_state = to_mdp5_plane_state(state);
+ mdp5_state = to_mdp5_plane_state(new_plane_state);
/* don't use fast path if we don't have a hwpipe allocated yet */
if (!mdp5_state->hwpipe)
return -EINVAL;
/* only allow changing of position(crtc x/y or src x/y) in fast path */
- if (plane->state->crtc != state->crtc ||
- plane->state->src_w != state->src_w ||
- plane->state->src_h != state->src_h ||
- plane->state->crtc_w != state->crtc_w ||
- plane->state->crtc_h != state->crtc_h ||
+ if (plane->state->crtc != new_plane_state->crtc ||
+ plane->state->src_w != new_plane_state->src_w ||
+ plane->state->src_h != new_plane_state->src_h ||
+ plane->state->crtc_w != new_plane_state->crtc_w ||
+ plane->state->crtc_h != new_plane_state->crtc_h ||
!plane->state->fb ||
- plane->state->fb != state->fb)
+ plane->state->fb != new_plane_state->fb)
return -EINVAL;
min_scale = FRAC_16_16(1, 8);
max_scale = FRAC_16_16(8, 1);
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale, max_scale,
true, true);
if (ret)
@@ -485,15 +494,17 @@ static int mdp5_plane_atomic_async_check(struct drm_plane *plane,
* also assign/unassign the hwpipe(s) tied to the plane. We avoid
* taking the fast path for both these reasons.
*/
- if (state->visible != plane->state->visible)
+ if (new_plane_state->visible != plane->state->visible)
return -EINVAL;
return 0;
}
static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_framebuffer *old_fb = plane->state->fb;
plane->state->src_x = new_state->src_x;
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 6a326761dc4a..e9c6544b6a01 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -5,7 +5,7 @@
*/
#include <drm/drm_atomic_uapi.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_vblank.h>
#include "msm_atomic_trace.h"
@@ -22,7 +22,7 @@ int msm_atomic_prepare_fb(struct drm_plane *plane,
if (!new_state->fb)
return 0;
- drm_gem_fb_prepare_fb(plane, new_state);
+ drm_gem_plane_helper_prepare_fb(plane, new_state);
return msm_framebuffer_prepare(new_state->fb, kms->aspace);
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index 3e1bb0aefb87..300e7bab0f43 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -21,8 +21,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
@@ -402,12 +402,14 @@ static const struct drm_encoder_funcs mxsfb_encoder_funcs = {
*/
static int mxsfb_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *plane_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
struct drm_crtc_state *crtc_state;
- crtc_state = drm_atomic_get_new_crtc_state(plane_state->state,
+ crtc_state = drm_atomic_get_new_crtc_state(state,
&mxsfb->crtc);
return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
@@ -417,7 +419,7 @@ static int mxsfb_plane_atomic_check(struct drm_plane *plane,
}
static void mxsfb_plane_primary_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_pstate)
+ struct drm_atomic_state *state)
{
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
dma_addr_t paddr;
@@ -428,10 +430,13 @@ static void mxsfb_plane_primary_atomic_update(struct drm_plane *plane,
}
static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_pstate)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_pstate = drm_atomic_get_old_plane_state(state,
+ plane);
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
+ plane);
dma_addr_t paddr;
u32 ctrl;
@@ -460,7 +465,7 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
ctrl = AS_CTRL_AS_ENABLE | AS_CTRL_ALPHA(255);
- switch (state->fb->format->format) {
+ switch (new_pstate->fb->format->format) {
case DRM_FORMAT_XRGB4444:
ctrl |= AS_CTRL_FORMAT_RGB444 | AS_CTRL_ALPHA_CTRL_OVERRIDE;
break;
@@ -495,13 +500,13 @@ static bool mxsfb_format_mod_supported(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = {
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = mxsfb_plane_atomic_check,
.atomic_update = mxsfb_plane_primary_atomic_update,
};
static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = {
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = mxsfb_plane_atomic_check,
.atomic_update = mxsfb_plane_overlay_atomic_update,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 271de3a63f21..0cb1f9d848d3 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -30,6 +30,7 @@
#include <nvhw/class/cl507e.h>
#include <nvhw/class/clc37e.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
@@ -434,12 +435,15 @@ nv50_wndw_atomic_check_lut(struct nv50_wndw *wndw,
}
static int
-nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
+nv50_wndw_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct nouveau_drm *drm = nouveau_drm(plane->dev);
struct nv50_wndw *wndw = nv50_wndw(plane);
struct nv50_wndw_atom *armw = nv50_wndw_atom(wndw->plane.state);
- struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
struct nv50_head_atom *harm = NULL, *asyh = NULL;
bool modeset = false;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index fabb314a0b2f..281e9ed13989 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -43,9 +43,9 @@
#include <nvif/if500b.h>
#include <nvif/if900b.h>
-static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
+static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg);
-static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
/*
* NV10-NV40 tiling helpers
@@ -300,18 +300,15 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
struct sg_table *sg, struct dma_resv *robj)
{
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
- size_t acc_size;
int ret;
- acc_size = ttm_bo_dma_acc_size(nvbo->bo.bdev, size, sizeof(*nvbo));
-
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
ret = ttm_bo_init(nvbo->bo.bdev, &nvbo->bo, size, type,
- &nvbo->placement, align >> PAGE_SHIFT, false,
- acc_size, sg, robj, nouveau_bo_del_ttm);
+ &nvbo->placement, align >> PAGE_SHIFT, false, sg,
+ robj, nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
return ret;
@@ -699,7 +696,7 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
}
static int
-nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
+nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -715,7 +712,7 @@ nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
}
static void
-nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
@@ -1080,7 +1077,7 @@ nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
}
static int
-nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
+nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
@@ -1188,7 +1185,7 @@ out:
}
static void
-nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
+nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
@@ -1248,7 +1245,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
}
static int
-nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
+nouveau_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_tt *ttm_dma = (void *)ttm;
@@ -1272,7 +1269,7 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
}
static void
-nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct nouveau_drm *drm;
@@ -1289,7 +1286,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
}
static void
-nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
+nouveau_ttm_tt_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -1321,7 +1318,7 @@ nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
nouveau_bo_move_ntfy(bo, false, NULL);
}
-struct ttm_bo_driver nouveau_bo_driver = {
+struct ttm_device_funcs nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 6045b85a762a..c2d3f9c48eba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -68,7 +68,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return 0;
}
-extern struct ttm_bo_driver nouveau_bo_driver;
+extern struct ttm_device_funcs nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 17831ee897ea..dac02c7be54d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -322,12 +322,9 @@ nouveau_framebuffer_new(struct drm_device *dev,
mode_cmd->pitches[0] >= 0x10000 || /* at most 64k pitch */
(mode_cmd->pitches[1] && /* pitches for planes must match */
mode_cmd->pitches[0] != mode_cmd->pitches[1]))) {
- struct drm_format_name_buf format_name;
- DRM_DEBUG_KMS("Unsuitable framebuffer: format: %s; pitches: 0x%x\n 0x%x\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name),
- mode_cmd->pitches[0],
- mode_cmd->pitches[1]);
+ DRM_DEBUG_KMS("Unsuitable framebuffer: format: %p4cc; pitches: 0x%x\n 0x%x\n",
+ &mode_cmd->pixel_format,
+ mode_cmd->pitches[0], mode_cmd->pitches[1]);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index d28ee6844245..ba65f136cf48 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -54,7 +54,6 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/ttm/ttm_memory.h>
#include <drm/drm_audio_component.h>
@@ -151,7 +150,7 @@ struct nouveau_drm {
/* TTM interface support */
struct {
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
atomic_t validate_sequence;
int (*move)(struct nouveau_channel *,
struct ttm_buffer_object *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 1cf52635ea74..256ec5b35473 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -16,7 +16,7 @@ struct nouveau_sgdma_be {
};
void
-nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
@@ -29,7 +29,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
}
int
-nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
+nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_drm *drm = nouveau_bdev(bdev);
@@ -56,7 +56,7 @@ nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_re
}
void
-nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (nvbe->mem) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index a37bc3d7b38b..b81ae90b8449 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -154,7 +154,7 @@ error_unlock:
return ret;
}
-static struct vm_operations_struct nouveau_ttm_vm_ops = {
+static const struct vm_operations_struct nouveau_ttm_vm_ops = {
.fault = nouveau_ttm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
@@ -324,10 +324,10 @@ nouveau_ttm_init(struct nouveau_drm *drm)
need_swiotlb = !!swiotlb_nr_tbl();
#endif
- ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver,
- drm->dev->dev, dev->anon_inode->i_mapping,
- dev->vma_offset_manager, need_swiotlb,
- drm->client.mmu.dmabits <= 32);
+ ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
+ dev->anon_inode->i_mapping,
+ dev->vma_offset_manager, need_swiotlb,
+ drm->client.mmu.dmabits <= 32);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;
@@ -377,7 +377,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
nouveau_ttm_fini_vram(drm);
nouveau_ttm_fini_gtt(drm);
- ttm_bo_device_release(&drm->ttm.bdev);
+ ttm_device_fini(&drm->ttm.bdev);
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.h b/drivers/gpu/drm/nouveau/nouveau_ttm.h
index 69552049bb96..dbf6dc238efd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.h
@@ -3,7 +3,7 @@
#define __NOUVEAU_TTM_H__
static inline struct nouveau_drm *
-nouveau_bdev(struct ttm_bo_device *bd)
+nouveau_bdev(struct ttm_device *bd)
{
return container_of(bd, struct nouveau_drm, ttm.bdev);
}
@@ -22,7 +22,7 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
int nouveau_ttm_global_init(struct nouveau_drm *);
void nouveau_ttm_global_release(struct nouveau_drm *);
-int nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg);
-void nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
-void nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+int nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg);
+void nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
+void nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 8e11612f5fe1..febcc87ddfe1 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -2149,11 +2149,12 @@ static int dsi_vc_send_short(struct dsi_data *dsi, int vc,
const struct mipi_dsi_msg *msg)
{
struct mipi_dsi_packet pkt;
+ int err;
u32 r;
- r = mipi_dsi_create_packet(&pkt, msg);
- if (r < 0)
- return r;
+ err = mipi_dsi_create_packet(&pkt, msg);
+ if (err)
+ return err;
WARN_ON(!dsi_bus_is_locked(dsi));
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 28bbad1353ee..8632139e0f01 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -68,6 +68,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
struct omap_drm_private *priv = dev->dev_private;
+ bool fence_cookie = dma_fence_begin_signalling();
dispc_runtime_get(priv->dispc);
@@ -90,8 +91,6 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
omap_atomic_wait_for_completion(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state, 0);
-
- drm_atomic_helper_commit_hw_done(old_state);
} else {
/*
* OMAP3 DSS seems to have issues with the work-around above,
@@ -101,10 +100,12 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_planes(dev, old_state, 0);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
-
- drm_atomic_helper_commit_hw_done(old_state);
}
+ drm_atomic_helper_commit_hw_done(old_state);
+
+ dma_fence_end_signalling(fence_cookie);
+
/*
* Wait for completion of the page flips to ensure that old buffers
* can't be touched by the hardware anymore before cleaning up planes.
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 51dc24acea73..801da917507d 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -40,30 +40,32 @@ static void omap_plane_cleanup_fb(struct drm_plane *plane,
}
static void omap_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane);
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct omap_overlay_info info;
int ret;
- DBG("%s, crtc=%p fb=%p", omap_plane->name, state->crtc, state->fb);
+ DBG("%s, crtc=%p fb=%p", omap_plane->name, new_state->crtc,
+ new_state->fb);
memset(&info, 0, sizeof(info));
info.rotation_type = OMAP_DSS_ROT_NONE;
info.rotation = DRM_MODE_ROTATE_0;
- info.global_alpha = state->alpha >> 8;
- info.zorder = state->normalized_zpos;
- if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ info.global_alpha = new_state->alpha >> 8;
+ info.zorder = new_state->normalized_zpos;
+ if (new_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
info.pre_mult_alpha = 1;
else
info.pre_mult_alpha = 0;
- info.color_encoding = state->color_encoding;
- info.color_range = state->color_range;
+ info.color_encoding = new_state->color_encoding;
+ info.color_range = new_state->color_range;
/* update scanout: */
- omap_framebuffer_update_scanout(state->fb, state, &info);
+ omap_framebuffer_update_scanout(new_state->fb, new_state, &info);
DBG("%dx%d -> %dx%d (%d)", info.width, info.height,
info.out_width, info.out_height,
@@ -73,8 +75,8 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
/* and finally, update omapdss: */
ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info,
- omap_crtc_timings(state->crtc), false,
- omap_crtc_channel(state->crtc));
+ omap_crtc_timings(new_state->crtc), false,
+ omap_crtc_channel(new_state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n",
omap_plane->name);
@@ -86,31 +88,35 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
}
static void omap_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_plane *omap_plane = to_omap_plane(plane);
- plane->state->rotation = DRM_MODE_ROTATE_0;
- plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
- ? 0 : omap_plane->id;
+ new_state->rotation = DRM_MODE_ROTATE_0;
+ new_state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id;
dispc_ovl_enable(priv->dispc, omap_plane->id, false);
}
static int omap_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state;
- if (!state->fb)
+ if (!new_plane_state->fb)
return 0;
- /* crtc should only be NULL when disabling (i.e., !state->fb) */
- if (WARN_ON(!state->crtc))
+ /* crtc should only be NULL when disabling (i.e., !new_plane_state->fb) */
+ if (WARN_ON(!new_plane_state->crtc))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_plane_state->crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
@@ -118,17 +124,17 @@ static int omap_plane_atomic_check(struct drm_plane *plane,
if (!crtc_state->enable)
return 0;
- if (state->crtc_x < 0 || state->crtc_y < 0)
+ if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0)
return -EINVAL;
- if (state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay)
+ if (new_plane_state->crtc_x + new_plane_state->crtc_w > crtc_state->adjusted_mode.hdisplay)
return -EINVAL;
- if (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)
+ if (new_plane_state->crtc_y + new_plane_state->crtc_h > crtc_state->adjusted_mode.vdisplay)
return -EINVAL;
- if (state->rotation != DRM_MODE_ROTATE_0 &&
- !omap_framebuffer_supports_rotation(state->fb))
+ if (new_plane_state->rotation != DRM_MODE_ROTATE_0 &&
+ !omap_framebuffer_supports_rotation(new_plane_state->fb))
return -EINVAL;
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index 66c7d765b8f7..59a8d99e777d 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -244,7 +244,7 @@ static int panel_lvds_probe(struct platform_device *pdev)
static int panel_lvds_remove(struct platform_device *pdev)
{
- struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev);
+ struct panel_lvds *lvds = platform_get_drvdata(pdev);
drm_panel_remove(&lvds->panel);
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 0ee508576231..3939b25e6666 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -267,7 +267,7 @@ static int seiko_panel_probe(struct device *dev,
static int seiko_panel_remove(struct platform_device *pdev)
{
- struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
+ struct seiko_panel *panel = platform_get_drvdata(pdev);
drm_panel_remove(&panel->base);
drm_panel_disable(&panel->base);
@@ -277,7 +277,7 @@ static int seiko_panel_remove(struct platform_device *pdev)
static void seiko_panel_shutdown(struct platform_device *pdev)
{
- struct seiko_panel *panel = dev_get_drvdata(&pdev->dev);
+ struct seiko_panel *panel = platform_get_drvdata(pdev);
drm_panel_disable(&panel->base);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 4e2dad314c79..9858079f9e14 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -4800,7 +4800,7 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
err = mipi_dsi_attach(dsi);
if (err) {
- struct panel_simple *panel = dev_get_drvdata(&dsi->dev);
+ struct panel_simple *panel = mipi_dsi_get_drvdata(dsi);
drm_panel_remove(&panel->base);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 56b3f5935703..7c5ffc81dce1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -130,8 +130,16 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
+ /*
+ * Setup default thresholds for the simple_ondemand governor.
+ * The values are chosen based on experiments.
+ */
+ pfdevfreq->gov_data.upthreshold = 45;
+ pfdevfreq->gov_data.downdifferential = 5;
+
devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
- DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &pfdevfreq->gov_data);
if (IS_ERR(devfreq)) {
DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
ret = PTR_ERR(devfreq);
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index db6ea48e21f9..1e2a4de941aa 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -4,6 +4,7 @@
#ifndef __PANFROST_DEVFREQ_H__
#define __PANFROST_DEVFREQ_H__
+#include <linux/devfreq.h>
#include <linux/spinlock.h>
#include <linux/ktime.h>
@@ -17,6 +18,7 @@ struct panfrost_devfreq {
struct devfreq *devfreq;
struct opp_table *regulators_opp_table;
struct thermal_cooling_device *cooling;
+ struct devfreq_simple_ondemand_data gov_data;
bool opp_of_table_added;
ktime_t busy_time;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 04e6f6f9b742..6003cfeb1322 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -432,7 +432,8 @@ static void panfrost_scheduler_start(struct panfrost_queue_state *queue)
mutex_unlock(&queue->lock);
}
-static void panfrost_job_timedout(struct drm_sched_job *sched_job)
+static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
+ *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
struct panfrost_device *pfdev = job->pfdev;
@@ -443,7 +444,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
* spurious. Bail out.
*/
if (dma_fence_is_signaled(job->done_fence))
- return;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js,
@@ -455,11 +456,13 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
/* Scheduler is already stopped, nothing to do. */
if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
- return;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
/* Schedule a reset if there's no reset in progress. */
if (!atomic_xchg(&pfdev->reset.pending, 1))
schedule_work(&pfdev->reset.work);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
static const struct drm_sched_backend_ops panfrost_sched_ops = {
@@ -624,7 +627,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
ret = drm_sched_init(&js->queue[j].sched,
&panfrost_sched_ops,
1, 0, msecs_to_jiffies(JOB_TIMEOUT_MS),
- "pan_js");
+ NULL, "pan_js");
if (ret) {
dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
goto err_sched;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 7c1b3481b785..0581186ebfb3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -488,8 +488,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
}
bo->base.pages = pages;
bo->base.pages_use_count = 1;
- } else
+ } else {
pages = bo->base.pages;
+ if (pages[page_offset]) {
+ /* Pages are already mapped, bail out. */
+ mutex_unlock(&bo->base.pages_lock);
+ goto out;
+ }
+ }
mapping = bo->base.base.filp->f_mapping;
mapping_set_unevictable(mapping);
@@ -522,6 +528,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
+out:
panfrost_gem_mapping_put(bomapping);
return 0;
@@ -571,32 +578,32 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
{
struct panfrost_device *pfdev = data;
u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
- int i, ret;
+ int ret;
- for (i = 0; status; i++) {
- u32 mask = BIT(i) | BIT(i + 16);
+ while (status) {
+ u32 as = ffs(status | (status >> 16)) - 1;
+ u32 mask = BIT(as) | BIT(as + 16);
u64 addr;
u32 fault_status;
u32 exception_type;
u32 access_type;
u32 source_id;
- if (!(status & mask))
- continue;
-
- fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
- addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
- addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
+ fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
+ addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
+ addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
/* decode the fault status */
exception_type = fault_status & 0xFF;
access_type = (fault_status >> 8) & 0x3;
source_id = (fault_status >> 16);
+ mmu_write(pfdev, MMU_INT_CLEAR, mask);
+
/* Page fault only */
ret = -1;
- if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
- ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
+ if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
+ ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
if (ret)
/* terminal fault, print info about the fault */
@@ -608,7 +615,7 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n",
- i, addr,
+ as, addr,
"TODO",
fault_status,
(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
@@ -616,9 +623,11 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
access_type, access_type_name(pfdev, fault_status),
source_id);
- mmu_write(pfdev, MMU_INT_CLEAR, mask);
-
status &= ~mask;
+
+ /* If we received new MMU interrupts, process them before returning. */
+ if (!status)
+ status = mmu_read(pfdev, MMU_INT_RAWSTAT);
}
mmu_write(pfdev, MMU_INT_MASK, ~0);
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 69c02e7c82b7..6fd7f13f1aca 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -17,8 +17,8 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
#include "pl111_drm.h"
@@ -440,7 +440,7 @@ static struct drm_simple_display_pipe_funcs pl111_display_funcs = {
.enable = pl111_display_enable,
.disable = pl111_display_disable,
.update = pl111_display_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
static int pl111_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 54e3c3a97440..7b00c955cd82 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -254,6 +254,7 @@ int qxl_garbage_collect(struct qxl_device *qdev)
}
}
+ wake_up_all(&qdev->release_event);
DRM_DEBUG_DRIVER("%d\n", i);
return i;
@@ -268,7 +269,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev,
int ret;
ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
- false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+ false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo);
if (ret) {
DRM_ERROR("failed to allocate VRAM BO\n");
return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 10738e04c09b..a7637e79cb42 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -464,25 +464,26 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
};
static int qxl_primary_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_bo *bo;
- if (!state->crtc || !state->fb)
+ if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
- bo = gem_to_qxl_bo(state->fb->obj[0]);
+ bo = gem_to_qxl_bo(new_plane_state->fb->obj[0]);
return qxl_check_framebuffer(qdev, bo);
}
-static int qxl_primary_apply_cursor(struct drm_plane *plane)
+static int qxl_primary_apply_cursor(struct qxl_device *qdev,
+ struct drm_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct qxl_device *qdev = to_qxl(dev);
- struct drm_framebuffer *fb = plane->state->fb;
- struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
struct qxl_cursor_cmd *cmd;
struct qxl_release *release;
int ret = 0;
@@ -506,8 +507,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
- cmd->u.set.position.x = plane->state->crtc_x + fb->hot_x;
- cmd->u.set.position.y = plane->state->crtc_y + fb->hot_y;
+ cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x;
+ cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0);
@@ -524,17 +525,126 @@ out_free_release:
return ret;
}
+static int qxl_primary_move_cursor(struct qxl_device *qdev,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc);
+ struct qxl_cursor_cmd *cmd;
+ struct qxl_release *release;
+ int ret = 0;
+
+ if (!qcrtc->cursor_bo)
+ return 0;
+
+ ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
+ QXL_RELEASE_CURSOR_CMD,
+ &release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+ if (ret) {
+ qxl_release_free(qdev, release);
+ return ret;
+ }
+
+ cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
+ cmd->type = QXL_CURSOR_MOVE;
+ cmd->u.position.x = plane_state->crtc_x + fb->hot_x;
+ cmd->u.position.y = plane_state->crtc_y + fb->hot_y;
+ qxl_release_unmap(qdev, release, &cmd->release_info);
+
+ qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+ return ret;
+}
+
+static struct qxl_bo *qxl_create_cursor(struct qxl_device *qdev,
+ struct qxl_bo *user_bo,
+ int hot_x, int hot_y)
+{
+ static const u32 size = 64 * 64 * 4;
+ struct qxl_bo *cursor_bo;
+ struct dma_buf_map cursor_map;
+ struct dma_buf_map user_map;
+ struct qxl_cursor cursor;
+ int ret;
+
+ if (!user_bo)
+ return NULL;
+
+ ret = qxl_bo_create(qdev, sizeof(struct qxl_cursor) + size,
+ false, true, QXL_GEM_DOMAIN_VRAM, 1,
+ NULL, &cursor_bo);
+ if (ret)
+ goto err;
+
+ ret = qxl_bo_vmap(cursor_bo, &cursor_map);
+ if (ret)
+ goto err_unref;
+
+ ret = qxl_bo_vmap(user_bo, &user_map);
+ if (ret)
+ goto err_unmap;
+
+ cursor.header.unique = 0;
+ cursor.header.type = SPICE_CURSOR_TYPE_ALPHA;
+ cursor.header.width = 64;
+ cursor.header.height = 64;
+ cursor.header.hot_spot_x = hot_x;
+ cursor.header.hot_spot_y = hot_y;
+ cursor.data_size = size;
+ cursor.chunk.next_chunk = 0;
+ cursor.chunk.prev_chunk = 0;
+ cursor.chunk.data_size = size;
+ if (cursor_map.is_iomem) {
+ memcpy_toio(cursor_map.vaddr_iomem,
+ &cursor, sizeof(cursor));
+ memcpy_toio(cursor_map.vaddr_iomem + sizeof(cursor),
+ user_map.vaddr, size);
+ } else {
+ memcpy(cursor_map.vaddr,
+ &cursor, sizeof(cursor));
+ memcpy(cursor_map.vaddr + sizeof(cursor),
+ user_map.vaddr, size);
+ }
+
+ qxl_bo_vunmap(user_bo);
+ qxl_bo_vunmap(cursor_bo);
+ return cursor_bo;
+
+err_unmap:
+ qxl_bo_vunmap(cursor_bo);
+err_unref:
+ qxl_bo_unpin(cursor_bo);
+ qxl_bo_unref(&cursor_bo);
+err:
+ return NULL;
+}
+
+static void qxl_free_cursor(struct qxl_bo *cursor_bo)
+{
+ if (!cursor_bo)
+ return;
+
+ qxl_bo_unpin(cursor_bo);
+ qxl_bo_unref(&cursor_bo);
+}
+
static void qxl_primary_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct qxl_device *qdev = to_qxl(plane->dev);
- struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
+ struct qxl_bo *bo = gem_to_qxl_bo(new_state->fb->obj[0]);
struct qxl_bo *primary;
struct drm_clip_rect norect = {
.x1 = 0,
.y1 = 0,
- .x2 = plane->state->fb->width,
- .y2 = plane->state->fb->height
+ .x2 = new_state->fb->width,
+ .y2 = new_state->fb->height
};
uint32_t dumb_shadow_offset = 0;
@@ -544,25 +654,29 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
if (qdev->primary_bo)
qxl_io_destroy_primary(qdev);
qxl_io_create_primary(qdev, primary);
- qxl_primary_apply_cursor(plane);
+ qxl_primary_apply_cursor(qdev, plane->state);
}
if (bo->is_dumb)
dumb_shadow_offset =
- qdev->dumb_heads[plane->state->crtc->index].x;
+ qdev->dumb_heads[new_state->crtc->index].x;
- qxl_draw_dirty_fb(qdev, plane->state->fb, bo, 0, 0, &norect, 1, 1,
+ qxl_draw_dirty_fb(qdev, new_state->fb, bo, 0, 0, &norect, 1, 1,
dumb_shadow_offset);
}
static void qxl_primary_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct qxl_device *qdev = to_qxl(plane->dev);
if (old_state->fb) {
struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
+ if (bo->shadow)
+ bo = bo->shadow;
if (bo->is_primary) {
qxl_io_destroy_primary(qdev);
bo->is_primary = false;
@@ -571,126 +685,29 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
}
static void qxl_cursor_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_device *dev = plane->dev;
- struct qxl_device *qdev = to_qxl(dev);
- struct drm_framebuffer *fb = plane->state->fb;
- struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
- struct qxl_release *release;
- struct qxl_cursor_cmd *cmd;
- struct qxl_cursor *cursor;
- struct drm_gem_object *obj;
- struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
- int ret;
- struct dma_buf_map user_map;
- struct dma_buf_map cursor_map;
- void *user_ptr;
- int size = 64*64*4;
-
- ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd),
- QXL_RELEASE_CURSOR_CMD,
- &release, NULL);
- if (ret)
- return;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct qxl_device *qdev = to_qxl(plane->dev);
+ struct drm_framebuffer *fb = new_state->fb;
if (fb != old_state->fb) {
- obj = fb->obj[0];
- user_bo = gem_to_qxl_bo(obj);
-
- /* pinning is done in the prepare/cleanup framevbuffer */
- ret = qxl_bo_kmap(user_bo, &user_map);
- if (ret)
- goto out_free_release;
- user_ptr = user_map.vaddr; /* TODO: Use mapping abstraction properly */
-
- ret = qxl_alloc_bo_reserved(qdev, release,
- sizeof(struct qxl_cursor) + size,
- &cursor_bo);
- if (ret)
- goto out_kunmap;
-
- ret = qxl_bo_pin(cursor_bo);
- if (ret)
- goto out_free_bo;
-
- ret = qxl_release_reserve_list(release, true);
- if (ret)
- goto out_unpin;
-
- ret = qxl_bo_kmap(cursor_bo, &cursor_map);
- if (ret)
- goto out_backoff;
- if (cursor_map.is_iomem) /* TODO: Use mapping abstraction properly */
- cursor = (struct qxl_cursor __force *)cursor_map.vaddr_iomem;
- else
- cursor = (struct qxl_cursor *)cursor_map.vaddr;
-
- cursor->header.unique = 0;
- cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
- cursor->header.width = 64;
- cursor->header.height = 64;
- cursor->header.hot_spot_x = fb->hot_x;
- cursor->header.hot_spot_y = fb->hot_y;
- cursor->data_size = size;
- cursor->chunk.next_chunk = 0;
- cursor->chunk.prev_chunk = 0;
- cursor->chunk.data_size = size;
- memcpy(cursor->chunk.data, user_ptr, size);
- qxl_bo_kunmap(cursor_bo);
- qxl_bo_kunmap(user_bo);
-
- cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
- cmd->u.set.visible = 1;
- cmd->u.set.shape = qxl_bo_physical_address(qdev,
- cursor_bo, 0);
- cmd->type = QXL_CURSOR_SET;
-
- old_cursor_bo = qcrtc->cursor_bo;
- qcrtc->cursor_bo = cursor_bo;
- cursor_bo = NULL;
+ qxl_primary_apply_cursor(qdev, new_state);
} else {
-
- ret = qxl_release_reserve_list(release, true);
- if (ret)
- goto out_free_release;
-
- cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
- cmd->type = QXL_CURSOR_MOVE;
+ qxl_primary_move_cursor(qdev, new_state);
}
-
- cmd->u.position.x = plane->state->crtc_x + fb->hot_x;
- cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
-
- qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_release_fence_buffer_objects(release);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
-
- if (old_cursor_bo != NULL)
- qxl_bo_unpin(old_cursor_bo);
- qxl_bo_unref(&old_cursor_bo);
- qxl_bo_unref(&cursor_bo);
-
- return;
-
-out_backoff:
- qxl_release_backoff_reserve_list(release);
-out_unpin:
- qxl_bo_unpin(cursor_bo);
-out_free_bo:
- qxl_bo_unref(&cursor_bo);
-out_kunmap:
- qxl_bo_kunmap(user_bo);
-out_free_release:
- qxl_release_free(qdev, release);
- return;
-
}
static void qxl_cursor_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct qxl_device *qdev = to_qxl(plane->dev);
+ struct qxl_crtc *qcrtc;
struct qxl_release *release;
struct qxl_cursor_cmd *cmd;
int ret;
@@ -713,6 +730,10 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
qxl_release_fence_buffer_objects(release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
+
+ qcrtc = to_qxl_crtc(old_state->crtc);
+ qxl_free_cursor(qcrtc->cursor_bo);
+ qcrtc->cursor_bo = NULL;
}
static void qxl_update_dumb_head(struct qxl_device *qdev,
@@ -770,13 +791,45 @@ static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
DRM_DEBUG("%dx%d\n", surf->width, surf->height);
}
+static void qxl_prepare_shadow(struct qxl_device *qdev, struct qxl_bo *user_bo,
+ int crtc_index)
+{
+ struct qxl_surface surf;
+
+ qxl_update_dumb_head(qdev, crtc_index,
+ user_bo);
+ qxl_calc_dumb_shadow(qdev, &surf);
+ if (!qdev->dumb_shadow_bo ||
+ qdev->dumb_shadow_bo->surf.width != surf.width ||
+ qdev->dumb_shadow_bo->surf.height != surf.height) {
+ if (qdev->dumb_shadow_bo) {
+ drm_gem_object_put
+ (&qdev->dumb_shadow_bo->tbo.base);
+ qdev->dumb_shadow_bo = NULL;
+ }
+ qxl_bo_create(qdev, surf.height * surf.stride,
+ true, true, QXL_GEM_DOMAIN_SURFACE, 0,
+ &surf, &qdev->dumb_shadow_bo);
+ }
+ if (user_bo->shadow != qdev->dumb_shadow_bo) {
+ if (user_bo->shadow) {
+ qxl_bo_unpin(user_bo->shadow);
+ drm_gem_object_put
+ (&user_bo->shadow->tbo.base);
+ user_bo->shadow = NULL;
+ }
+ drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base);
+ user_bo->shadow = qdev->dumb_shadow_bo;
+ qxl_bo_pin(user_bo->shadow);
+ }
+}
+
static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct qxl_device *qdev = to_qxl(plane->dev);
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
- struct qxl_surface surf;
if (!new_state->fb)
return 0;
@@ -786,30 +839,18 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
user_bo->is_dumb) {
- qxl_update_dumb_head(qdev, new_state->crtc->index,
- user_bo);
- qxl_calc_dumb_shadow(qdev, &surf);
- if (!qdev->dumb_shadow_bo ||
- qdev->dumb_shadow_bo->surf.width != surf.width ||
- qdev->dumb_shadow_bo->surf.height != surf.height) {
- if (qdev->dumb_shadow_bo) {
- drm_gem_object_put
- (&qdev->dumb_shadow_bo->tbo.base);
- qdev->dumb_shadow_bo = NULL;
- }
- qxl_bo_create(qdev, surf.height * surf.stride,
- true, true, QXL_GEM_DOMAIN_SURFACE, &surf,
- &qdev->dumb_shadow_bo);
- }
- if (user_bo->shadow != qdev->dumb_shadow_bo) {
- if (user_bo->shadow) {
- drm_gem_object_put
- (&user_bo->shadow->tbo.base);
- user_bo->shadow = NULL;
- }
- drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base);
- user_bo->shadow = qdev->dumb_shadow_bo;
- }
+ qxl_prepare_shadow(qdev, user_bo, new_state->crtc->index);
+ }
+
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ plane->state->fb != new_state->fb) {
+ struct qxl_crtc *qcrtc = to_qxl_crtc(new_state->crtc);
+ struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo;
+
+ qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo,
+ new_state->fb->hot_x,
+ new_state->fb->hot_y);
+ qxl_free_cursor(old_cursor_bo);
}
return qxl_bo_pin(user_bo);
@@ -834,6 +875,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
qxl_bo_unpin(user_bo);
if (old_state->fb != plane->state->fb && user_bo->shadow) {
+ qxl_bo_unpin(user_bo->shadow);
drm_gem_object_put(&user_bo->shadow->tbo.base);
user_bo->shadow = NULL;
}
@@ -1155,12 +1197,10 @@ int qxl_create_monitors_object(struct qxl_device *qdev)
}
qdev->monitors_config_bo = gem_to_qxl_bo(gobj);
- ret = qxl_bo_pin(qdev->monitors_config_bo);
+ ret = qxl_bo_vmap(qdev->monitors_config_bo, &map);
if (ret)
return ret;
- qxl_bo_kmap(qdev->monitors_config_bo, &map);
-
qdev->monitors_config = qdev->monitors_config_bo->kptr;
qdev->ram_header->monitors_config =
qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0);
@@ -1179,11 +1219,13 @@ int qxl_destroy_monitors_object(struct qxl_device *qdev)
{
int ret;
+ if (!qdev->monitors_config_bo)
+ return 0;
+
qdev->monitors_config = NULL;
qdev->ram_header->monitors_config = 0;
- qxl_bo_kunmap(qdev->monitors_config_bo);
- ret = qxl_bo_unpin(qdev->monitors_config_bo);
+ ret = qxl_bo_vunmap(qdev->monitors_config_bo);
if (ret)
return ret;
@@ -1196,7 +1238,9 @@ int qxl_modeset_init(struct qxl_device *qdev)
int i;
int ret;
- drm_mode_config_init(&qdev->ddev);
+ ret = drmm_mode_config_init(&qdev->ddev);
+ if (ret)
+ return ret;
ret = qxl_create_monitors_object(qdev);
if (ret)
@@ -1228,6 +1272,10 @@ int qxl_modeset_init(struct qxl_device *qdev)
void qxl_modeset_fini(struct qxl_device *qdev)
{
+ if (qdev->dumb_shadow_bo) {
+ qxl_bo_unpin(qdev->dumb_shadow_bo);
+ drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base);
+ qdev->dumb_shadow_bo = NULL;
+ }
qxl_destroy_monitors_object(qdev);
- drm_mode_config_cleanup(&qdev->ddev);
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 7b7acb910780..7d27891e87fa 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -48,7 +48,7 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
struct qxl_clip_rects *dev_clips;
int ret;
- ret = qxl_bo_kmap(clips_bo, &map);
+ ret = qxl_bo_vmap_locked(clips_bo, &map);
if (ret)
return NULL;
dev_clips = map.vaddr; /* TODO: Use mapping abstraction properly */
@@ -202,7 +202,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
if (ret)
goto out_release_backoff;
- ret = qxl_bo_kmap(bo, &surface_map);
+ ret = qxl_bo_vmap_locked(bo, &surface_map);
if (ret)
goto out_release_backoff;
surface_base = surface_map.vaddr; /* TODO: Use mapping abstraction properly */
@@ -210,7 +210,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
ret = qxl_image_init(qdev, release, dimage, surface_base,
left - dumb_shadow_offset,
top, width, height, depth, stride);
- qxl_bo_kunmap(bo);
+ qxl_bo_vunmap_locked(bo);
if (ret)
goto out_release_backoff;
@@ -247,7 +247,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
rects[i].top = clips_ptr->y1;
rects[i].bottom = clips_ptr->y2;
}
- qxl_bo_kunmap(clips_bo);
+ qxl_bo_vunmap_locked(clips_bo);
qxl_release_fence_buffer_objects(release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 83b54f0dad61..6dd57cfb2e7c 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -125,7 +125,7 @@ struct qxl_output {
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
};
struct qxl_memslot {
@@ -214,6 +214,8 @@ struct qxl_device {
spinlock_t release_lock;
struct idr release_idr;
uint32_t release_seqno;
+ atomic_t release_count;
+ wait_queue_head_t release_event;
spinlock_t release_idr_lock;
struct mutex async_io_mutex;
unsigned int last_sent_io_cmd;
@@ -335,7 +337,7 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
-int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem);
/* qxl image */
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index c04cd5a2553c..48a58ba1db96 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -59,7 +59,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
surf.stride = pitch;
surf.format = format;
r = qxl_gem_object_create_with_handle(qdev, file_priv,
- QXL_GEM_DOMAIN_SURFACE,
+ QXL_GEM_DOMAIN_CPU,
args->size, &surf, &qobj,
&handle);
if (r)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 48e096285b4c..a08da0bd9098 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
/* At least align on page size */
if (alignment < PAGE_SIZE)
alignment = PAGE_SIZE;
- r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
+ r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR(
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index 60ab7151b84d..ffff54e5fb31 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -186,7 +186,7 @@ qxl_image_init_helper(struct qxl_device *qdev,
}
}
}
- qxl_bo_kunmap(chunk_bo);
+ qxl_bo_vunmap_locked(chunk_bo);
image_bo = dimage->bo;
ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index ddf6588a2a38..d312322cacd1 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -87,6 +87,7 @@ int qxl_irq_init(struct qxl_device *qdev)
init_waitqueue_head(&qdev->display_event);
init_waitqueue_head(&qdev->cursor_event);
init_waitqueue_head(&qdev->io_cmd_event);
+ init_waitqueue_head(&qdev->release_event);
INIT_WORK(&qdev->client_monitors_config_work,
qxl_client_monitors_config_work_func);
atomic_set(&qdev->irq_received, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 4a60a52ab62e..4dc5ad13f12c 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -286,11 +286,35 @@ vram_mapping_free:
void qxl_device_fini(struct qxl_device *qdev)
{
- qxl_bo_unref(&qdev->current_release_bo[0]);
- qxl_bo_unref(&qdev->current_release_bo[1]);
+ int cur_idx;
+
+ /* check if qxl_device_init() was successful (gc_work is initialized last) */
+ if (!qdev->gc_work.func)
+ return;
+
+ for (cur_idx = 0; cur_idx < 3; cur_idx++) {
+ if (!qdev->current_release_bo[cur_idx])
+ continue;
+ qxl_bo_unpin(qdev->current_release_bo[cur_idx]);
+ qxl_bo_unref(&qdev->current_release_bo[cur_idx]);
+ qdev->current_release_bo_offset[cur_idx] = 0;
+ qdev->current_release_bo[cur_idx] = NULL;
+ }
+
+ /*
+ * Ask host to release resources (+fill release ring),
+ * then wait for the release actually happening.
+ */
+ qxl_io_notify_oom(qdev);
+ wait_event_timeout(qdev->release_event,
+ atomic_read(&qdev->release_count) == 0,
+ HZ);
+ flush_work(&qdev->gc_work);
+ qxl_surf_evict(qdev);
+ qxl_vram_evict(qdev);
+
qxl_gem_fini(qdev);
qxl_bo_fini(qdev);
- flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index ceebc5881f68..6e26d70f2f07 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -29,6 +29,9 @@
#include "qxl_drv.h"
#include "qxl_object.h"
+static int __qxl_bo_pin(struct qxl_bo *bo);
+static void __qxl_bo_unpin(struct qxl_bo *bo);
+
static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct qxl_bo *bo;
@@ -103,8 +106,8 @@ static const struct drm_gem_object_funcs qxl_object_funcs = {
.print_info = drm_gem_ttm_print_info,
};
-int qxl_bo_create(struct qxl_device *qdev,
- unsigned long size, bool kernel, bool pinned, u32 domain,
+int qxl_bo_create(struct qxl_device *qdev, unsigned long size,
+ bool kernel, bool pinned, u32 domain, u32 priority,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
@@ -137,9 +140,10 @@ int qxl_bo_create(struct qxl_device *qdev,
qxl_ttm_placement_from_domain(bo, domain);
+ bo->tbo.priority = priority;
r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, &ctx, size,
- NULL, NULL, &qxl_ttm_bo_destroy);
+ &bo->placement, 0, &ctx, NULL, NULL,
+ &qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(qdev->ddev.dev,
@@ -154,10 +158,12 @@ int qxl_bo_create(struct qxl_device *qdev,
return 0;
}
-int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map)
+int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map)
{
int r;
+ dma_resv_assert_held(bo->tbo.base.resv);
+
if (bo->kptr) {
bo->map_count++;
goto out;
@@ -178,6 +184,25 @@ out:
return 0;
}
+int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map)
+{
+ int r;
+
+ r = qxl_bo_reserve(bo);
+ if (r)
+ return r;
+
+ r = __qxl_bo_pin(bo);
+ if (r) {
+ qxl_bo_unreserve(bo);
+ return r;
+ }
+
+ r = qxl_bo_vmap_locked(bo, map);
+ qxl_bo_unreserve(bo);
+ return r;
+}
+
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
@@ -202,7 +227,7 @@ fallback:
return rptr;
}
- ret = qxl_bo_kmap(bo, &bo_map);
+ ret = qxl_bo_vmap_locked(bo, &bo_map);
if (ret)
return NULL;
rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */
@@ -211,8 +236,10 @@ fallback:
return rptr;
}
-void qxl_bo_kunmap(struct qxl_bo *bo)
+void qxl_bo_vunmap_locked(struct qxl_bo *bo)
{
+ dma_resv_assert_held(bo->tbo.base.resv);
+
if (bo->kptr == NULL)
return;
bo->map_count--;
@@ -222,6 +249,20 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
ttm_bo_vunmap(&bo->tbo, &bo->map);
}
+int qxl_bo_vunmap(struct qxl_bo *bo)
+{
+ int r;
+
+ r = qxl_bo_reserve(bo);
+ if (r)
+ return r;
+
+ qxl_bo_vunmap_locked(bo);
+ __qxl_bo_unpin(bo);
+ qxl_bo_unreserve(bo);
+ return 0;
+}
+
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
@@ -232,7 +273,7 @@ void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
io_mapping_unmap_atomic(pmap);
return;
fallback:
- qxl_bo_kunmap(bo);
+ qxl_bo_vunmap_locked(bo);
}
void qxl_bo_unref(struct qxl_bo **bo)
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index e60a8f88e226..ee9c29de4d3d 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -61,10 +61,13 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
extern int qxl_bo_create(struct qxl_device *qdev,
unsigned long size,
bool kernel, bool pinned, u32 domain,
+ u32 priority,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr);
-extern int qxl_bo_kmap(struct qxl_bo *bo, struct dma_buf_map *map);
-extern void qxl_bo_kunmap(struct qxl_bo *bo);
+int qxl_bo_vmap(struct qxl_bo *bo, struct dma_buf_map *map);
+int qxl_bo_vmap_locked(struct qxl_bo *bo, struct dma_buf_map *map);
+int qxl_bo_vunmap(struct qxl_bo *bo);
+void qxl_bo_vunmap_locked(struct qxl_bo *bo);
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 4aa949799446..0628d1cc91fe 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -59,7 +59,7 @@ int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
struct qxl_bo *bo = gem_to_qxl_bo(obj);
int ret;
- ret = qxl_bo_kmap(bo, map);
+ ret = qxl_bo_vmap(bo, map);
if (ret < 0)
return ret;
@@ -71,7 +71,7 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj,
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
- qxl_bo_kunmap(bo);
+ qxl_bo_vunmap(bo);
}
int qxl_gem_prime_mmap(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b372455e2729..f5845c96d414 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -58,56 +58,16 @@ static long qxl_fence_wait(struct dma_fence *fence, bool intr,
signed long timeout)
{
struct qxl_device *qdev;
- struct qxl_release *release;
- int count = 0, sc = 0;
- bool have_drawable_releases;
unsigned long cur, end = jiffies + timeout;
qdev = container_of(fence->lock, struct qxl_device, release_lock);
- release = container_of(fence, struct qxl_release, base);
- have_drawable_releases = release->type == QXL_RELEASE_DRAWABLE;
-
-retry:
- sc++;
-
- if (dma_fence_is_signaled(fence))
- goto signaled;
-
- qxl_io_notify_oom(qdev);
-
- for (count = 0; count < 11; count++) {
- if (!qxl_queue_garbage_collect(qdev, true))
- break;
-
- if (dma_fence_is_signaled(fence))
- goto signaled;
- }
-
- if (dma_fence_is_signaled(fence))
- goto signaled;
-
- if (have_drawable_releases || sc < 4) {
- if (sc > 2)
- /* back off */
- usleep_range(500, 1000);
- if (time_after(jiffies, end))
- return 0;
-
- if (have_drawable_releases && sc > 300) {
- DMA_FENCE_WARN(fence, "failed to wait on release %llu "
- "after spincount %d\n",
- fence->context & ~0xf0000000, sc);
- goto signaled;
- }
- goto retry;
- }
- /*
- * yeah, original sync_obj_wait gave up after 3 spins when
- * have_drawable_releases is not set.
- */
+ if (!wait_event_timeout(qdev->release_event,
+ (dma_fence_is_signaled(fence) ||
+ (qxl_io_notify_oom(qdev), 0)),
+ timeout))
+ return 0;
-signaled:
cur = jiffies;
if (time_after(cur, end))
return 0;
@@ -196,14 +156,16 @@ qxl_release_free(struct qxl_device *qdev,
qxl_release_free_list(release);
kfree(release);
}
+ atomic_dec(&qdev->release_count);
}
static int qxl_release_bo_alloc(struct qxl_device *qdev,
- struct qxl_bo **bo)
+ struct qxl_bo **bo,
+ u32 priority)
{
/* pin releases bo's they are too messy to evict */
return qxl_bo_create(qdev, PAGE_SIZE, false, true,
- QXL_GEM_DOMAIN_VRAM, NULL, bo);
+ QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
}
int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
@@ -326,13 +288,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
int ret = 0;
union qxl_release_info *info;
int cur_idx;
+ u32 priority;
- if (type == QXL_RELEASE_DRAWABLE)
+ if (type == QXL_RELEASE_DRAWABLE) {
cur_idx = 0;
- else if (type == QXL_RELEASE_SURFACE_CMD)
+ priority = 0;
+ } else if (type == QXL_RELEASE_SURFACE_CMD) {
cur_idx = 1;
- else if (type == QXL_RELEASE_CURSOR_CMD)
+ priority = 1;
+ } else if (type == QXL_RELEASE_CURSOR_CMD) {
cur_idx = 2;
+ priority = 1;
+ }
else {
DRM_ERROR("got illegal type: %d\n", type);
return -EINVAL;
@@ -344,6 +311,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
*rbo = NULL;
return idr_ret;
}
+ atomic_inc(&qdev->release_count);
mutex_lock(&qdev->release_mutex);
if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) {
@@ -352,7 +320,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
qdev->current_release_bo[cur_idx] = NULL;
}
if (!qdev->current_release_bo[cur_idx]) {
- ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx]);
+ ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority);
if (ret) {
mutex_unlock(&qdev->release_mutex);
if (free_bo) {
@@ -437,7 +405,7 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_buffer_object *bo;
- struct ttm_bo_device *bdev;
+ struct ttm_device *bdev;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -458,7 +426,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
@@ -467,7 +435,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 33c09dc94f8b..b7f77eb685cb 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -36,7 +36,7 @@
#include "qxl_drv.h"
#include "qxl_object.h"
-static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
+static struct qxl_device *qxl_get_qdev(struct ttm_device *bdev)
{
struct qxl_mman *mman;
struct qxl_device *qdev;
@@ -69,7 +69,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement;
}
-int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct qxl_device *qdev = qxl_get_qdev(bdev);
@@ -98,8 +98,7 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
/*
* TTM backend functions.
*/
-static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm)
+static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
@@ -170,7 +169,7 @@ static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo)
qxl_bo_move_notify(bo, false, NULL);
}
-static struct ttm_bo_driver qxl_bo_driver = {
+static struct ttm_device_funcs qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
.ttm_tt_destroy = &qxl_ttm_backend_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
@@ -193,10 +192,10 @@ int qxl_ttm_init(struct qxl_device *qdev)
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
- qdev->ddev.anon_inode->i_mapping,
- qdev->ddev.vma_offset_manager,
- false, false);
+ r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
+ qdev->ddev.anon_inode->i_mapping,
+ qdev->ddev.vma_offset_manager,
+ false, false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
@@ -227,7 +226,7 @@ void qxl_ttm_fini(struct qxl_device *qdev)
{
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV);
- ttm_bo_device_release(&qdev->mman.bdev);
+ ttm_device_fini(&qdev->mman.bdev);
DRM_INFO("qxl: ttm finalized\n");
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 1979ed3d6547..c94e429e75f9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1157,7 +1157,6 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1267,8 +1266,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
@@ -1478,7 +1477,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
- struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@@ -1579,8 +1577,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(target_fb->format->format, &format_name));
+ DRM_ERROR("Unsupported screen format %p4cc\n",
+ &target_fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3effc8c71494..1aab2ccfed13 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -451,7 +451,7 @@ struct radeon_surface_reg {
* TTM.
*/
struct radeon_mman {
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
bool initialized;
#if defined(CONFIG_DEBUG_FS)
@@ -2824,7 +2824,7 @@ extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
uint32_t flags);
extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm);
extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm);
-bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+bool radeon_ttm_tt_is_bound(struct ttm_device *bdev, struct ttm_tt *ttm);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
@@ -2834,7 +2834,7 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
extern void radeon_program_register_sequence(struct radeon_device *rdev,
const u32 *registers,
const u32 array_size);
-struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev);
+struct radeon_device *radeon_get_rdev(struct ttm_device *bdev);
/* KMS */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 9b81786782de..804f7a427be7 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -159,7 +159,6 @@ int radeon_bo_create(struct radeon_device *rdev,
struct radeon_bo *bo;
enum ttm_bo_type type;
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
- size_t acc_size;
int r;
size = ALIGN(size, PAGE_SIZE);
@@ -173,9 +172,6 @@ int radeon_bo_create(struct radeon_device *rdev,
}
*bo_ptr = NULL;
- acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
- sizeof(struct radeon_bo));
-
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
@@ -230,8 +226,8 @@ int radeon_bo_create(struct radeon_device *rdev,
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, !kernel, acc_size,
- sg, resv, &radeon_ttm_bo_destroy);
+ &bo->placement, page_align, !kernel, sg, resv,
+ &radeon_ttm_bo_destroy);
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
@@ -372,7 +368,7 @@ void radeon_bo_unpin(struct radeon_bo *bo)
int radeon_bo_evict_vram(struct radeon_device *rdev)
{
- struct ttm_bo_device *bdev = &rdev->mman.bdev;
+ struct ttm_device *bdev = &rdev->mman.bdev;
struct ttm_resource_manager *man;
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e8c66d10478f..5fc8bae401af 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -55,13 +55,11 @@
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
-static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm,
+static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
-static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm);
+static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
-struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
+struct radeon_device *radeon_get_rdev(struct ttm_device *bdev)
{
struct radeon_mman *mman;
struct radeon_device *rdev;
@@ -280,7 +278,7 @@ out:
return 0;
}
-static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
+static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
@@ -347,7 +345,7 @@ struct radeon_ttm_tt {
};
/* prepare the sg table with the user pages */
-static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = (void *)ttm;
@@ -408,7 +406,7 @@ release_pages:
return r;
}
-static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static void radeon_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = (void *)ttm;
@@ -444,7 +442,7 @@ static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm)
return (gtt->bound);
}
-static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
+static int radeon_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
@@ -480,7 +478,7 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
return 0;
}
-static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static void radeon_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
struct radeon_device *rdev = radeon_get_rdev(bdev);
@@ -495,7 +493,7 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt
gtt->bound = false;
}
-static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
@@ -554,7 +552,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
return container_of(ttm, struct radeon_ttm_tt, ttm);
}
-static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
+static int radeon_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{
@@ -580,7 +578,7 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
}
-static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
@@ -613,7 +611,7 @@ int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
return 0;
}
-bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev,
+bool radeon_ttm_tt_is_bound(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -624,7 +622,7 @@ bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev,
return radeon_ttm_backend_is_bound(ttm);
}
-static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
+static int radeon_ttm_tt_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
@@ -642,7 +640,7 @@ static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
return radeon_ttm_backend_bind(bdev, ttm, bo_mem);
}
-static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
+static void radeon_ttm_tt_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -656,7 +654,7 @@ static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
radeon_ttm_backend_unbind(bdev, ttm);
}
-static void radeon_ttm_tt_destroy(struct ttm_bo_device *bdev,
+static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -700,7 +698,7 @@ radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
radeon_bo_move_notify(bo, false, NULL);
}
-static struct ttm_bo_driver radeon_bo_driver = {
+static struct ttm_device_funcs radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
@@ -718,7 +716,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
int r;
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
+ r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
rdev->ddev->anon_inode->i_mapping,
rdev->ddev->vma_offset_manager,
rdev->need_swiotlb,
@@ -788,7 +786,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
}
ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
- ttm_bo_device_release(&rdev->mman.bdev);
+ ttm_device_fini(&rdev->mman.bdev);
radeon_gart_fini(rdev);
rdev->mman.initialized = false;
DRM_INFO("radeon: ttm finalized\n");
@@ -837,7 +835,7 @@ unlock_mclk:
return ret;
}
-static struct vm_operations_struct radeon_ttm_vm_ops = {
+static const struct vm_operations_struct radeon_ttm_vm_ops = {
.fault = radeon_ttm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index 02e5f11f38eb..862197be1e01 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -607,21 +607,26 @@ int __rcar_du_plane_atomic_check(struct drm_plane *plane,
}
static int rcar_du_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct rcar_du_plane_state *rstate = to_rcar_plane_state(state);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct rcar_du_plane_state *rstate = to_rcar_plane_state(new_plane_state);
- return __rcar_du_plane_atomic_check(plane, state, &rstate->format);
+ return __rcar_du_plane_atomic_check(plane, new_plane_state,
+ &rstate->format);
}
static void rcar_du_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct rcar_du_plane *rplane = to_rcar_plane(plane);
struct rcar_du_plane_state *old_rstate;
struct rcar_du_plane_state *new_rstate;
- if (!plane->state->visible)
+ if (!new_state->visible)
return;
rcar_du_plane_setup(rplane);
@@ -635,7 +640,7 @@ static void rcar_du_plane_atomic_update(struct drm_plane *plane,
* bit. We thus need to restart the group if the source changes.
*/
old_rstate = to_rcar_plane_state(old_state);
- new_rstate = to_rcar_plane_state(plane->state);
+ new_rstate = to_rcar_plane_state(new_state);
if ((old_rstate->source == RCAR_DU_PLANE_MEMORY) !=
(new_rstate->source == RCAR_DU_PLANE_MEMORY))
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 53221d8473c1..23e41c83c875 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -7,12 +7,13 @@
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_vblank.h>
@@ -236,7 +237,7 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
if (ret < 0)
return ret;
- return drm_gem_fb_prepare_fb(plane, state);
+ return drm_gem_plane_helper_prepare_fb(plane, state);
}
void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
@@ -265,20 +266,25 @@ static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
}
static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(new_plane_state);
- return __rcar_du_plane_atomic_check(plane, state, &rstate->format);
+ return __rcar_du_plane_atomic_check(plane, new_plane_state,
+ &rstate->format);
}
static void rcar_du_vsp_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct rcar_du_vsp_plane *rplane = to_rcar_vsp_plane(plane);
struct rcar_du_crtc *crtc = to_rcar_crtc(old_state->crtc);
- if (plane->state->visible)
+ if (new_state->visible)
rcar_du_vsp_plane_setup(rplane);
else if (old_state->crtc)
vsp1_du_atomic_update(rplane->vsp->vsp, crtc->vsp_pipe,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 8d15cabdcb02..81c70d7a0471 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -23,6 +23,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -778,11 +779,13 @@ static bool rockchip_mod_supported(struct drm_plane *plane,
}
static int vop_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_crtc *crtc = state->crtc;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_framebuffer *fb = new_plane_state->fb;
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
int ret;
@@ -794,17 +797,18 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale, max_scale,
true, true);
if (ret)
return ret;
- if (!state->visible)
+ if (!new_plane_state->visible)
return 0;
ret = vop_convert_format(fb->format->format);
@@ -815,12 +819,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel.
*/
- if (fb->format->is_yuv && ((state->src.x1 >> 16) % 2)) {
+ if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) {
DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
- if (fb->format->is_yuv && state->rotation & DRM_MODE_REFLECT_Y) {
+ if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) {
DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n");
return -EINVAL;
}
@@ -837,14 +841,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
if (ret < 0)
return ret;
- if (state->src.x1 || state->src.y1) {
- DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", state->src.x1, state->src.y1, fb->offsets[0]);
+ if (new_plane_state->src.x1 || new_plane_state->src.y1) {
+ DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n",
+ new_plane_state->src.x1,
+ new_plane_state->src.y1, fb->offsets[0]);
return -EINVAL;
}
- if (state->rotation && state->rotation != DRM_MODE_ROTATE_0) {
+ if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) {
DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
- state->rotation);
+ new_plane_state->rotation);
return -EINVAL;
}
}
@@ -853,8 +859,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
}
static void vop_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct vop_win *vop_win = to_vop_win(plane);
struct vop *vop = to_vop(old_state->crtc);
@@ -869,20 +877,23 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
}
static void vop_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
- struct drm_crtc *crtc = state->crtc;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc *crtc = new_state->crtc;
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
const struct vop_win_yuv2yuv_data *win_yuv2yuv = vop_win->yuv2yuv_data;
- struct vop *vop = to_vop(state->crtc);
- struct drm_framebuffer *fb = state->fb;
+ struct vop *vop = to_vop(new_state->crtc);
+ struct drm_framebuffer *fb = new_state->fb;
unsigned int actual_w, actual_h;
unsigned int dsp_stx, dsp_sty;
uint32_t act_info, dsp_info, dsp_st;
- struct drm_rect *src = &state->src;
- struct drm_rect *dest = &state->dst;
+ struct drm_rect *src = &new_state->src;
+ struct drm_rect *dest = &new_state->dst;
struct drm_gem_object *obj, *uv_obj;
struct rockchip_gem_object *rk_obj, *rk_uv_obj;
unsigned long offset;
@@ -903,8 +914,8 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
if (WARN_ON(!vop->is_enabled))
return;
- if (!state->visible) {
- vop_plane_atomic_disable(plane, old_state);
+ if (!new_state->visible) {
+ vop_plane_atomic_disable(plane, state);
return;
}
@@ -930,7 +941,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
* For y-mirroring we need to move address
* to the beginning of the last line.
*/
- if (state->rotation & DRM_MODE_REFLECT_Y)
+ if (new_state->rotation & DRM_MODE_REFLECT_Y)
dma_addr += (actual_h - 1) * fb->pitches[0];
format = vop_convert_format(fb->format->format);
@@ -952,9 +963,9 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
VOP_WIN_YUV2YUV_SET(vop, win_yuv2yuv, y2r_en, is_yuv);
VOP_WIN_SET(vop, win, y_mir_en,
- (state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0);
+ (new_state->rotation & DRM_MODE_REFLECT_Y) ? 1 : 0);
VOP_WIN_SET(vop, win, x_mir_en,
- (state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0);
+ (new_state->rotation & DRM_MODE_REFLECT_X) ? 1 : 0);
if (is_yuv) {
int hsub = fb->format->hsub;
@@ -1021,8 +1032,10 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
}
static int vop_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct vop_win *vop_win = to_vop_win(plane);
const struct vop_win_data *win = vop_win->data;
int min_scale = win->phy->scl ? FRAC_16_16(1, 8) :
@@ -1031,7 +1044,7 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
DRM_PLANE_HELPER_NO_SCALING;
struct drm_crtc_state *crtc_state;
- if (plane != state->crtc->cursor)
+ if (plane != new_plane_state->crtc->cursor)
return -EINVAL;
if (!plane->state)
@@ -1040,9 +1053,9 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
- if (state->state)
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ if (state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_plane_state->crtc);
else /* Special case for asynchronous cursor updates. */
crtc_state = plane->crtc->state;
@@ -1052,8 +1065,10 @@ static int vop_plane_atomic_async_check(struct drm_plane *plane,
}
static void vop_plane_atomic_async_update(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct vop *vop = to_vop(plane->state->crtc);
struct drm_framebuffer *old_fb = plane->state->fb;
@@ -1068,7 +1083,7 @@ static void vop_plane_atomic_async_update(struct drm_plane *plane,
swap(plane->state->fb, new_state->fb);
if (vop->is_enabled) {
- vop_plane_atomic_update(plane, plane->state);
+ vop_plane_atomic_update(plane, state);
spin_lock(&vop->reg_lock);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
@@ -1096,7 +1111,7 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = {
.atomic_disable = vop_plane_atomic_disable,
.atomic_async_check = vop_plane_atomic_async_check,
.atomic_async_update = vop_plane_atomic_async_update,
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
};
static const struct drm_plane_funcs vop_plane_funcs = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 654bc52d9ff3..bd5ba10822c2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -725,7 +725,7 @@ static int rockchip_lvds_probe(struct platform_device *pdev)
static int rockchip_lvds_remove(struct platform_device *pdev)
{
- struct rockchip_lvds *lvds = dev_get_drvdata(&pdev->dev);
+ struct rockchip_lvds *lvds = platform_get_drvdata(pdev);
component_del(&pdev->dev, &rockchip_lvds_component_ops);
clk_unprepare(lvds->pclk);
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index c1ac3e4003c6..92d965b629c6 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -489,7 +489,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
bool first;
trace_drm_sched_job(sched_job, entity);
- atomic_inc(&entity->rq->sched->score);
+ atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 92637b70c9bf..d82a7ebf6099 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -91,7 +91,7 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_inc(&rq->sched->score);
+ atomic_inc(rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@@ -110,7 +110,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_dec(&rq->sched->score);
+ atomic_dec(rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
@@ -173,7 +173,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job)
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->score);
+ atomic_dec(sched->score);
trace_drm_sched_process_job(s_fence);
@@ -527,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
EXPORT_SYMBOL(drm_sched_start);
/**
- * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
+ * drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list
*
* @sched: scheduler instance
*
@@ -561,8 +561,6 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
} else {
s_job->s_fence->parent = fence;
}
-
-
}
}
EXPORT_SYMBOL(drm_sched_resubmit_jobs);
@@ -734,7 +732,7 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
continue;
}
- num_score = atomic_read(&sched->score);
+ num_score = atomic_read(sched->score);
if (num_score < min_score) {
min_score = num_score;
picked_sched = sched;
@@ -844,16 +842,15 @@ static int drm_sched_main(void *param)
* @hw_submission: number of hw submissions that can be in flight
* @hang_limit: number of times to allow a job to hang before dropping it
* @timeout: timeout value in jiffies for the scheduler
+ * @score: optional score atomic shared with other schedulers
* @name: name used for debugging
*
* Return 0 on success, otherwise error code.
*/
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
- unsigned hw_submission,
- unsigned hang_limit,
- long timeout,
- const char *name)
+ unsigned hw_submission, unsigned hang_limit, long timeout,
+ atomic_t *score, const char *name)
{
int i, ret;
sched->ops = ops;
@@ -861,6 +858,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
sched->name = name;
sched->timeout = timeout;
sched->hang_limit = hang_limit;
+ sched->score = score ? score : &sched->_score;
for (i = DRM_SCHED_PRIORITY_MIN; i < DRM_SCHED_PRIORITY_COUNT; i++)
drm_sched_rq_init(sched, &sched->sched_rq[i]);
@@ -870,7 +868,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
- atomic_set(&sched->score, 0);
+ atomic_set(&sched->_score, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 7476301d7142..1d6051b4f6fe 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -181,12 +181,14 @@ static void sti_cursor_init(struct sti_cursor *cursor)
}
static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
@@ -196,15 +198,17 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
if (!crtc || !fb)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
mode = &crtc_state->mode;
- dst_x = state->crtc_x;
- dst_y = state->crtc_y;
- dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
- dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+ dst_x = new_plane_state->crtc_x;
+ dst_y = new_plane_state->crtc_y;
+ dst_w = clamp_val(new_plane_state->crtc_w, 0,
+ mode->crtc_hdisplay - dst_x);
+ dst_h = clamp_val(new_plane_state->crtc_h, 0,
+ mode->crtc_vdisplay - dst_y);
/* src_x are in 16.16 format */
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ src_w = new_plane_state->src_w >> 16;
+ src_h = new_plane_state->src_h >> 16;
if (src_w < STI_CURS_MIN_SIZE ||
src_h < STI_CURS_MIN_SIZE ||
@@ -252,13 +256,14 @@ static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
}
static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = drm_plane->state;
+ struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
+ drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = newstate->crtc;
+ struct drm_framebuffer *fb = newstate->fb;
struct drm_display_mode *mode;
int dst_x, dst_y;
struct drm_gem_cma_object *cma_obj;
@@ -269,8 +274,8 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
return;
mode = &crtc->mode;
- dst_x = state->crtc_x;
- dst_y = state->crtc_y;
+ dst_x = newstate->crtc_x;
+ dst_y = newstate->crtc_y;
cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
@@ -306,8 +311,10 @@ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
}
static void sti_cursor_atomic_disable(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
+ drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
if (!oldstate->crtc) {
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 2f4a34f14d33..d1a35d97bc45 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -615,12 +615,14 @@ static int sti_gdp_get_dst(struct device *dev, int dst, int src)
}
static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_gdp *gdp = to_sti_gdp(plane);
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
struct sti_mixer *mixer;
struct drm_display_mode *mode;
@@ -633,17 +635,19 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
return 0;
mixer = to_sti_mixer(crtc);
- crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
mode = &crtc_state->mode;
- dst_x = state->crtc_x;
- dst_y = state->crtc_y;
- dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
- dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
+ dst_x = new_plane_state->crtc_x;
+ dst_y = new_plane_state->crtc_y;
+ dst_w = clamp_val(new_plane_state->crtc_w, 0, mode->hdisplay - dst_x);
+ dst_h = clamp_val(new_plane_state->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
- src_x = state->src_x >> 16;
- src_y = state->src_y >> 16;
- src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
- src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
+ src_x = new_plane_state->src_x >> 16;
+ src_y = new_plane_state->src_y >> 16;
+ src_w = clamp_val(new_plane_state->src_w >> 16, 0,
+ GAM_GDP_SIZE_MAX_WIDTH);
+ src_h = clamp_val(new_plane_state->src_h >> 16, 0,
+ GAM_GDP_SIZE_MAX_HEIGHT);
format = sti_gdp_fourcc2format(fb->format->format);
if (format == -1) {
@@ -695,13 +699,16 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
}
static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = drm_plane->state;
+ struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
+ drm_plane);
+ struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
+ drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_gdp *gdp = to_sti_gdp(plane);
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = newstate->crtc;
+ struct drm_framebuffer *fb = newstate->fb;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
@@ -718,15 +725,15 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
if (!crtc || !fb)
return;
- if ((oldstate->fb == state->fb) &&
- (oldstate->crtc_x == state->crtc_x) &&
- (oldstate->crtc_y == state->crtc_y) &&
- (oldstate->crtc_w == state->crtc_w) &&
- (oldstate->crtc_h == state->crtc_h) &&
- (oldstate->src_x == state->src_x) &&
- (oldstate->src_y == state->src_y) &&
- (oldstate->src_w == state->src_w) &&
- (oldstate->src_h == state->src_h)) {
+ if ((oldstate->fb == newstate->fb) &&
+ (oldstate->crtc_x == newstate->crtc_x) &&
+ (oldstate->crtc_y == newstate->crtc_y) &&
+ (oldstate->crtc_w == newstate->crtc_w) &&
+ (oldstate->crtc_h == newstate->crtc_h) &&
+ (oldstate->src_x == newstate->src_x) &&
+ (oldstate->src_y == newstate->src_y) &&
+ (oldstate->src_w == newstate->src_w) &&
+ (oldstate->src_h == newstate->src_h)) {
/* No change since last update, do not post cmd */
DRM_DEBUG_DRIVER("No change, not posting cmd\n");
plane->status = STI_PLANE_UPDATED;
@@ -744,15 +751,15 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
}
mode = &crtc->mode;
- dst_x = state->crtc_x;
- dst_y = state->crtc_y;
- dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
- dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
+ dst_x = newstate->crtc_x;
+ dst_y = newstate->crtc_y;
+ dst_w = clamp_val(newstate->crtc_w, 0, mode->hdisplay - dst_x);
+ dst_h = clamp_val(newstate->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
- src_x = state->src_x >> 16;
- src_y = state->src_y >> 16;
- src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
- src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
+ src_x = newstate->src_x >> 16;
+ src_y = newstate->src_y >> 16;
+ src_w = clamp_val(newstate->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
+ src_h = clamp_val(newstate->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
list = sti_gdp_get_free_nodes(gdp);
top_field = list->top_field;
@@ -860,8 +867,10 @@ end:
}
static void sti_gdp_atomic_disable(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
+ drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
if (!oldstate->crtc) {
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 62f824cd5f21..edbb99f53de1 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -1017,12 +1017,14 @@ out:
}
static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = new_plane_state->crtc;
+ struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
@@ -1032,17 +1034,17 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
if (!crtc || !fb)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
mode = &crtc_state->mode;
- dst_x = state->crtc_x;
- dst_y = state->crtc_y;
- dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
- dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
+ dst_x = new_plane_state->crtc_x;
+ dst_y = new_plane_state->crtc_y;
+ dst_w = clamp_val(new_plane_state->crtc_w, 0, mode->hdisplay - dst_x);
+ dst_h = clamp_val(new_plane_state->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
- src_x = state->src_x >> 16;
- src_y = state->src_y >> 16;
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ src_x = new_plane_state->src_x >> 16;
+ src_y = new_plane_state->src_y >> 16;
+ src_w = new_plane_state->src_w >> 16;
+ src_h = new_plane_state->src_h >> 16;
if (mode->clock && !sti_hqvdp_check_hw_scaling(hqvdp, mode,
src_w, src_h,
@@ -1107,13 +1109,16 @@ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
}
static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = drm_plane->state;
+ struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
+ drm_plane);
+ struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
+ drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
- struct drm_crtc *crtc = state->crtc;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_crtc *crtc = newstate->crtc;
+ struct drm_framebuffer *fb = newstate->fb;
struct drm_display_mode *mode;
int dst_x, dst_y, dst_w, dst_h;
int src_x, src_y, src_w, src_h;
@@ -1125,15 +1130,15 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
if (!crtc || !fb)
return;
- if ((oldstate->fb == state->fb) &&
- (oldstate->crtc_x == state->crtc_x) &&
- (oldstate->crtc_y == state->crtc_y) &&
- (oldstate->crtc_w == state->crtc_w) &&
- (oldstate->crtc_h == state->crtc_h) &&
- (oldstate->src_x == state->src_x) &&
- (oldstate->src_y == state->src_y) &&
- (oldstate->src_w == state->src_w) &&
- (oldstate->src_h == state->src_h)) {
+ if ((oldstate->fb == newstate->fb) &&
+ (oldstate->crtc_x == newstate->crtc_x) &&
+ (oldstate->crtc_y == newstate->crtc_y) &&
+ (oldstate->crtc_w == newstate->crtc_w) &&
+ (oldstate->crtc_h == newstate->crtc_h) &&
+ (oldstate->src_x == newstate->src_x) &&
+ (oldstate->src_y == newstate->src_y) &&
+ (oldstate->src_w == newstate->src_w) &&
+ (oldstate->src_h == newstate->src_h)) {
/* No change since last update, do not post cmd */
DRM_DEBUG_DRIVER("No change, not posting cmd\n");
plane->status = STI_PLANE_UPDATED;
@@ -1141,15 +1146,15 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
}
mode = &crtc->mode;
- dst_x = state->crtc_x;
- dst_y = state->crtc_y;
- dst_w = clamp_val(state->crtc_w, 0, mode->hdisplay - dst_x);
- dst_h = clamp_val(state->crtc_h, 0, mode->vdisplay - dst_y);
+ dst_x = newstate->crtc_x;
+ dst_y = newstate->crtc_y;
+ dst_w = clamp_val(newstate->crtc_w, 0, mode->hdisplay - dst_x);
+ dst_h = clamp_val(newstate->crtc_h, 0, mode->vdisplay - dst_y);
/* src_x are in 16.16 format */
- src_x = state->src_x >> 16;
- src_y = state->src_y >> 16;
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ src_x = newstate->src_x >> 16;
+ src_y = newstate->src_y >> 16;
+ src_w = newstate->src_w >> 16;
+ src_h = newstate->src_h >> 16;
cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
if (cmd_offset == -1) {
@@ -1238,8 +1243,10 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
}
static void sti_hqvdp_atomic_disable(struct drm_plane *drm_plane,
- struct drm_plane_state *oldstate)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
+ drm_plane);
struct sti_plane *plane = to_sti_plane(drm_plane);
if (!oldstate->crtc) {
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 7812094f93d6..b5117fccf355 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -26,8 +26,8 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -525,13 +525,42 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
+ struct drm_connector_list_iter iter;
+ struct drm_connector *connector = NULL;
+ struct drm_encoder *encoder = NULL;
+ struct drm_bridge *bridge = NULL;
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
struct videomode vm;
u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
u32 total_width, total_height;
+ u32 bus_flags = 0;
u32 val;
int ret;
+ /* get encoder from crtc */
+ drm_for_each_encoder(encoder, ddev)
+ if (encoder->crtc == crtc)
+ break;
+
+ if (encoder) {
+ /* get bridge from encoder */
+ list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
+ if (bridge->encoder == encoder)
+ break;
+
+ /* Get the connector from encoder */
+ drm_connector_list_iter_begin(ddev, &iter);
+ drm_for_each_connector_iter(connector, &iter)
+ if (connector->encoder == encoder)
+ break;
+ drm_connector_list_iter_end(&iter);
+ }
+
+ if (bridge && bridge->timings)
+ bus_flags = bridge->timings->input_bus_flags;
+ else if (connector)
+ bus_flags = connector->display_info.bus_flags;
+
if (!pm_runtime_active(ddev->dev)) {
ret = pm_runtime_get_sync(ddev->dev);
if (ret) {
@@ -567,10 +596,10 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (vm.flags & DISPLAY_FLAGS_VSYNC_HIGH)
val |= GCR_VSPOL;
- if (vm.flags & DISPLAY_FLAGS_DE_LOW)
+ if (bus_flags & DRM_BUS_FLAG_DE_LOW)
val |= GCR_DEPOL;
- if (vm.flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
val |= GCR_PCPOL;
reg_update_bits(ldev->regs, LTDC_GCR,
@@ -720,9 +749,11 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
*/
static int ltdc_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct drm_framebuffer *fb = state->fb;
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *fb = new_plane_state->fb;
u32 src_w, src_h;
DRM_DEBUG_DRIVER("\n");
@@ -731,11 +762,11 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
return 0;
/* convert src_ from 16:16 format */
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ src_w = new_plane_state->src_w >> 16;
+ src_h = new_plane_state->src_h >> 16;
/* Reject scaling */
- if (src_w != state->crtc_w || src_h != state->crtc_h) {
+ if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) {
DRM_ERROR("Scaling is not supported");
return -EINVAL;
}
@@ -744,36 +775,37 @@ static int ltdc_plane_atomic_check(struct drm_plane *plane,
}
static void ltdc_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *oldstate)
+ struct drm_atomic_state *state)
{
struct ltdc_device *ldev = plane_to_ltdc(plane);
- struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
+ struct drm_plane_state *newstate = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *fb = newstate->fb;
u32 lofs = plane->index * LAY_OFS;
- u32 x0 = state->crtc_x;
- u32 x1 = state->crtc_x + state->crtc_w - 1;
- u32 y0 = state->crtc_y;
- u32 y1 = state->crtc_y + state->crtc_h - 1;
+ u32 x0 = newstate->crtc_x;
+ u32 x1 = newstate->crtc_x + newstate->crtc_w - 1;
+ u32 y0 = newstate->crtc_y;
+ u32 y1 = newstate->crtc_y + newstate->crtc_h - 1;
u32 src_x, src_y, src_w, src_h;
u32 val, pitch_in_bytes, line_length, paddr, ahbp, avbp, bpcr;
enum ltdc_pix_fmt pf;
- if (!state->crtc || !fb) {
+ if (!newstate->crtc || !fb) {
DRM_DEBUG_DRIVER("fb or crtc NULL");
return;
}
/* convert src_ from 16:16 format */
- src_x = state->src_x >> 16;
- src_y = state->src_y >> 16;
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ src_x = newstate->src_x >> 16;
+ src_y = newstate->src_y >> 16;
+ src_w = newstate->src_w >> 16;
+ src_h = newstate->src_h >> 16;
DRM_DEBUG_DRIVER("plane:%d fb:%d (%dx%d)@(%d,%d) -> (%dx%d)@(%d,%d)\n",
plane->base.id, fb->base.id,
src_w, src_h, src_x, src_y,
- state->crtc_w, state->crtc_h,
- state->crtc_x, state->crtc_y);
+ newstate->crtc_w, newstate->crtc_h,
+ newstate->crtc_x, newstate->crtc_y);
bpcr = reg_read(ldev->regs, LTDC_BPCR);
ahbp = (bpcr & BPCR_AHBP) >> 16;
@@ -832,7 +864,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
reg_update_bits(ldev->regs, LTDC_L1CFBLNR + lofs, LXCFBLNR_CFBLN, val);
/* Sets the FB address */
- paddr = (u32)drm_fb_cma_get_gem_addr(fb, state, 0);
+ paddr = (u32)drm_fb_cma_get_gem_addr(fb, newstate, 0);
DRM_DEBUG_DRIVER("fb: phys 0x%08x", paddr);
reg_write(ldev->regs, LTDC_L1CFBAR + lofs, paddr);
@@ -858,8 +890,10 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
}
static void ltdc_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *oldstate)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *oldstate = drm_atomic_get_old_plane_state(state,
+ plane);
struct ltdc_device *ldev = plane_to_ltdc(plane);
u32 lofs = plane->index * LAY_OFS;
@@ -911,7 +945,7 @@ static const struct drm_plane_funcs ltdc_plane_funcs = {
};
static const struct drm_plane_helper_funcs ltdc_plane_helper_funcs = {
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = ltdc_plane_atomic_check,
.atomic_update = ltdc_plane_atomic_update,
.atomic_disable = ltdc_plane_atomic_disable,
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 522e51a404cc..bf8cfefa0365 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -510,7 +510,6 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
struct sun4i_layer_state *layer_state =
state_to_sun4i_layer_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
- struct drm_format_name_buf format_name;
if (!sun4i_backend_plane_is_supported(plane_state,
&layer_state->uses_frontend))
@@ -527,9 +526,8 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
}
}
- DRM_DEBUG_DRIVER("Plane FB format is %s\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ DRM_DEBUG_DRIVER("Plane FB format is %p4cc\n",
+ &fb->format->format);
if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
num_alpha_planes++;
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index acfbfd4463a1..11771bdd6e7c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -6,8 +6,9 @@
* Maxime Ripard <maxime.ripard@free-electrons.com>
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "sun4i_backend.h"
@@ -63,8 +64,10 @@ static void sun4i_backend_layer_destroy_state(struct drm_plane *plane,
}
static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(old_state);
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
struct sun4i_backend *backend = layer->backend;
@@ -81,9 +84,11 @@ static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane,
}
static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(plane->state);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct sun4i_layer_state *layer_state = state_to_sun4i_layer_state(new_state);
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
struct sun4i_backend *backend = layer->backend;
struct sun4i_frontend *frontend = backend->frontend;
@@ -122,7 +127,7 @@ static bool sun4i_layer_format_mod_supported(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs sun4i_backend_layer_helper_funcs = {
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_disable = sun4i_backend_layer_atomic_disable,
.atomic_update = sun4i_backend_layer_atomic_update,
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index 816ad4ce8996..0db164a774a1 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -14,8 +14,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -72,6 +72,27 @@ static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
}
}
+static void sun8i_ui_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
+ int overlay, struct drm_plane *plane)
+{
+ u32 mask, val, ch_base;
+
+ ch_base = sun8i_channel_base(mixer, channel);
+
+ mask = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_MASK |
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK;
+
+ val = SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA(plane->state->alpha >> 8);
+
+ val |= (plane->state->alpha == DRM_BLEND_ALPHA_OPAQUE) ?
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_PIXEL :
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_COMBINED;
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
+ mask, val);
+}
+
static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane,
unsigned int zpos)
@@ -236,17 +257,20 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
}
static int sun8i_ui_layer_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
- struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
int min_scale, max_scale;
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -258,14 +282,17 @@ static int sun8i_ui_layer_atomic_check(struct drm_plane *plane,
max_scale = SUN8I_UI_SCALER_SCALE_MAX;
}
- return drm_atomic_helper_check_plane_state(state, crtc_state,
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
min_scale, max_scale,
true, true);
}
static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
@@ -275,14 +302,18 @@ static void sun8i_ui_layer_atomic_disable(struct drm_plane *plane,
}
static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct sun8i_ui_layer *layer = plane_to_sun8i_ui_layer(plane);
- unsigned int zpos = plane->state->normalized_zpos;
+ unsigned int zpos = new_state->normalized_zpos;
unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
- if (!plane->state->visible) {
+ if (!new_state->visible) {
sun8i_ui_layer_enable(mixer, layer->channel,
layer->overlay, false, 0, old_zpos);
return;
@@ -290,6 +321,8 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
sun8i_ui_layer_update_coord(mixer, layer->channel,
layer->overlay, plane, zpos);
+ sun8i_ui_layer_update_alpha(mixer, layer->channel,
+ layer->overlay, plane);
sun8i_ui_layer_update_formats(mixer, layer->channel,
layer->overlay, plane);
sun8i_ui_layer_update_buffer(mixer, layer->channel,
@@ -299,7 +332,7 @@ static void sun8i_ui_layer_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs sun8i_ui_layer_helper_funcs = {
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = sun8i_ui_layer_atomic_check,
.atomic_disable = sun8i_ui_layer_atomic_disable,
.atomic_update = sun8i_ui_layer_atomic_update,
@@ -367,6 +400,12 @@ struct sun8i_ui_layer *sun8i_ui_layer_init_one(struct drm_device *drm,
plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+ ret = drm_plane_create_alpha_property(&layer->plane);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't add alpha property\n");
+ return ERR_PTR(ret);
+ }
+
ret = drm_plane_create_zpos_property(&layer->plane, channel,
0, plane_cnt - 1);
if (ret) {
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
index f4ab1cf6cded..e3e32ee1178d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.h
@@ -40,6 +40,11 @@
#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK GENMASK(12, 8)
#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET 8
#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24)
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA(x) ((x) << 24)
+
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_PIXEL ((0) << 1)
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_LAYER ((1) << 1)
+#define SUN8I_MIXER_CHAN_UI_LAYER_ATTR_ALPHA_MODE_COMBINED ((2) << 1)
struct sun8i_mixer;
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 8cc294a9969d..46420780db59 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -7,8 +7,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -66,6 +66,36 @@ static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
}
}
+static void sun8i_vi_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
+ int overlay, struct drm_plane *plane)
+{
+ u32 mask, val, ch_base;
+
+ ch_base = sun8i_channel_base(mixer, channel);
+
+ if (mixer->cfg->is_de3) {
+ mask = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK |
+ SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK;
+ val = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA
+ (plane->state->alpha >> 8);
+
+ val |= (plane->state->alpha == DRM_BLEND_ALPHA_OPAQUE) ?
+ SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_PIXEL :
+ SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_COMBINED;
+
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base,
+ overlay),
+ mask, val);
+ } else if (mixer->cfg->vi_num == 1) {
+ regmap_update_bits(mixer->engine.regs,
+ SUN8I_MIXER_FCC_GLOBAL_ALPHA_REG,
+ SUN8I_MIXER_FCC_GLOBAL_ALPHA_MASK,
+ SUN8I_MIXER_FCC_GLOBAL_ALPHA
+ (plane->state->alpha >> 8));
+ }
+}
+
static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane,
unsigned int zpos)
@@ -268,14 +298,6 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE, val);
- /* It seems that YUV formats use global alpha setting. */
- if (mixer->cfg->is_de3)
- regmap_update_bits(mixer->engine.regs,
- SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base,
- overlay),
- SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK,
- SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(0xff));
-
return 0;
}
@@ -339,17 +361,20 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
}
static int sun8i_vi_layer_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
- struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
int min_scale, max_scale;
if (!crtc)
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -361,14 +386,17 @@ static int sun8i_vi_layer_atomic_check(struct drm_plane *plane,
max_scale = SUN8I_VI_SCALER_SCALE_MAX;
}
- return drm_atomic_helper_check_plane_state(state, crtc_state,
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
min_scale, max_scale,
true, true);
}
static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
@@ -378,14 +406,18 @@ static void sun8i_vi_layer_atomic_disable(struct drm_plane *plane,
}
static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct sun8i_vi_layer *layer = plane_to_sun8i_vi_layer(plane);
- unsigned int zpos = plane->state->normalized_zpos;
+ unsigned int zpos = new_state->normalized_zpos;
unsigned int old_zpos = old_state->normalized_zpos;
struct sun8i_mixer *mixer = layer->mixer;
- if (!plane->state->visible) {
+ if (!new_state->visible) {
sun8i_vi_layer_enable(mixer, layer->channel,
layer->overlay, false, 0, old_zpos);
return;
@@ -393,6 +425,8 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
sun8i_vi_layer_update_coord(mixer, layer->channel,
layer->overlay, plane, zpos);
+ sun8i_vi_layer_update_alpha(mixer, layer->channel,
+ layer->overlay, plane);
sun8i_vi_layer_update_formats(mixer, layer->channel,
layer->overlay, plane);
sun8i_vi_layer_update_buffer(mixer, layer->channel,
@@ -402,7 +436,7 @@ static void sun8i_vi_layer_atomic_update(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs sun8i_vi_layer_helper_funcs = {
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = sun8i_vi_layer_atomic_check,
.atomic_disable = sun8i_vi_layer_atomic_disable,
.atomic_update = sun8i_vi_layer_atomic_update,
@@ -534,6 +568,14 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
+ if (mixer->cfg->vi_num == 1 || mixer->cfg->is_de3) {
+ ret = drm_plane_create_alpha_property(&layer->plane);
+ if (ret) {
+ dev_err(drm->dev, "Couldn't add alpha property\n");
+ return ERR_PTR(ret);
+ }
+ }
+
ret = drm_plane_create_zpos_property(&layer->plane, index,
0, plane_cnt - 1);
if (ret) {
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
index eaa6076f5dbc..48c399e1c86d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
@@ -29,14 +29,25 @@
#define SUN8I_MIXER_CHAN_VI_VDS_UV(base) \
((base) + 0xfc)
+#define SUN8I_MIXER_FCC_GLOBAL_ALPHA_REG \
+ (0xAA000 + 0x90)
+
+#define SUN8I_MIXER_FCC_GLOBAL_ALPHA(x) ((x) << 24)
+#define SUN8I_MIXER_FCC_GLOBAL_ALPHA_MASK GENMASK(31, 24)
+
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0)
/* RGB mode should be set for RGB formats and cleared for YCbCr */
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE BIT(15)
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET 8
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK GENMASK(12, 8)
+#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK GENMASK(2, 1)
#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24)
#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24)
+#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_PIXEL ((0) << 1)
+#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_LAYER ((1) << 1)
+#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_COMBINED ((2) << 1)
+
#define SUN8I_MIXER_CHAN_VI_DS_N(x) ((x) << 16)
#define SUN8I_MIXER_CHAN_VI_DS_M(x) ((x) << 0)
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 0ae3a025efe9..da6afe7f0c7d 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -604,23 +604,25 @@ static const u64 tegra124_modifiers[] = {
};
static int tegra_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
unsigned int supported_rotation = DRM_MODE_ROTATE_0 |
DRM_MODE_REFLECT_X |
DRM_MODE_REFLECT_Y;
- unsigned int rotation = state->rotation;
+ unsigned int rotation = new_plane_state->rotation;
struct tegra_bo_tiling *tiling = &plane_state->tiling;
struct tegra_plane *tegra = to_tegra_plane(plane);
- struct tegra_dc *dc = to_tegra_dc(state->crtc);
+ struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
int err;
/* no need for further checks if the plane is being disabled */
- if (!state->crtc)
+ if (!new_plane_state->crtc)
return 0;
- err = tegra_plane_format(state->fb->format->format,
+ err = tegra_plane_format(new_plane_state->fb->format->format,
&plane_state->format,
&plane_state->swap);
if (err < 0)
@@ -638,7 +640,7 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
return err;
}
- err = tegra_fb_get_tiling(state->fb, tiling);
+ err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
if (err < 0)
return err;
@@ -654,7 +656,7 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
* property in order to achieve the same result. The legacy BO flag
* duplicates the DRM rotation property when both are set.
*/
- if (tegra_fb_is_bottom_up(state->fb))
+ if (tegra_fb_is_bottom_up(new_plane_state->fb))
rotation |= DRM_MODE_REFLECT_Y;
rotation = drm_rotation_simplify(rotation, supported_rotation);
@@ -674,14 +676,14 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
* error out if the user tries to display a framebuffer with such a
* configuration.
*/
- if (state->fb->format->num_planes > 2) {
- if (state->fb->pitches[2] != state->fb->pitches[1]) {
+ if (new_plane_state->fb->format->num_planes > 2) {
+ if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
DRM_ERROR("unsupported UV-plane configuration\n");
return -EINVAL;
}
}
- err = tegra_plane_state_add(tegra, state);
+ err = tegra_plane_state_add(tegra, new_plane_state);
if (err < 0)
return err;
@@ -689,8 +691,10 @@ static int tegra_plane_atomic_check(struct drm_plane *plane,
}
static void tegra_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct tegra_plane *p = to_tegra_plane(plane);
u32 value;
@@ -704,42 +708,44 @@ static void tegra_plane_atomic_disable(struct drm_plane *plane,
}
static void tegra_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
- struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
+ struct drm_framebuffer *fb = new_state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_dc_window window;
unsigned int i;
/* rien ne va plus */
- if (!plane->state->crtc || !plane->state->fb)
+ if (!new_state->crtc || !new_state->fb)
return;
- if (!plane->state->visible)
- return tegra_plane_atomic_disable(plane, old_state);
+ if (!new_state->visible)
+ return tegra_plane_atomic_disable(plane, state);
memset(&window, 0, sizeof(window));
- window.src.x = plane->state->src.x1 >> 16;
- window.src.y = plane->state->src.y1 >> 16;
- window.src.w = drm_rect_width(&plane->state->src) >> 16;
- window.src.h = drm_rect_height(&plane->state->src) >> 16;
- window.dst.x = plane->state->dst.x1;
- window.dst.y = plane->state->dst.y1;
- window.dst.w = drm_rect_width(&plane->state->dst);
- window.dst.h = drm_rect_height(&plane->state->dst);
+ window.src.x = new_state->src.x1 >> 16;
+ window.src.y = new_state->src.y1 >> 16;
+ window.src.w = drm_rect_width(&new_state->src) >> 16;
+ window.src.h = drm_rect_height(&new_state->src) >> 16;
+ window.dst.x = new_state->dst.x1;
+ window.dst.y = new_state->dst.y1;
+ window.dst.w = drm_rect_width(&new_state->dst);
+ window.dst.h = drm_rect_height(&new_state->dst);
window.bits_per_pixel = fb->format->cpp[0] * 8;
- window.reflect_x = state->reflect_x;
- window.reflect_y = state->reflect_y;
+ window.reflect_x = tegra_plane_state->reflect_x;
+ window.reflect_y = tegra_plane_state->reflect_y;
/* copy from state */
- window.zpos = plane->state->normalized_zpos;
- window.tiling = state->tiling;
- window.format = state->format;
- window.swap = state->swap;
+ window.zpos = new_state->normalized_zpos;
+ window.tiling = tegra_plane_state->tiling;
+ window.format = tegra_plane_state->format;
+ window.swap = tegra_plane_state->swap;
for (i = 0; i < fb->format->num_planes; i++) {
- window.base[i] = state->iova[i] + fb->offsets[i];
+ window.base[i] = tegra_plane_state->iova[i] + fb->offsets[i];
/*
* Tegra uses a shared stride for UV planes. Framebuffers are
@@ -831,29 +837,31 @@ static const u32 tegra_cursor_plane_formats[] = {
};
static int tegra_cursor_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct tegra_plane *tegra = to_tegra_plane(plane);
int err;
/* no need for further checks if the plane is being disabled */
- if (!state->crtc)
+ if (!new_plane_state->crtc)
return 0;
/* scaling not supported for cursor */
- if ((state->src_w >> 16 != state->crtc_w) ||
- (state->src_h >> 16 != state->crtc_h))
+ if ((new_plane_state->src_w >> 16 != new_plane_state->crtc_w) ||
+ (new_plane_state->src_h >> 16 != new_plane_state->crtc_h))
return -EINVAL;
/* only square cursors supported */
- if (state->src_w != state->src_h)
+ if (new_plane_state->src_w != new_plane_state->src_h)
return -EINVAL;
- if (state->crtc_w != 32 && state->crtc_w != 64 &&
- state->crtc_w != 128 && state->crtc_w != 256)
+ if (new_plane_state->crtc_w != 32 && new_plane_state->crtc_w != 64 &&
+ new_plane_state->crtc_w != 128 && new_plane_state->crtc_w != 256)
return -EINVAL;
- err = tegra_plane_state_add(tegra, state);
+ err = tegra_plane_state_add(tegra, new_plane_state);
if (err < 0)
return err;
@@ -861,17 +869,19 @@ static int tegra_cursor_atomic_check(struct drm_plane *plane,
}
static void tegra_cursor_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
- struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
+ struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
u32 value = CURSOR_CLIP_DISPLAY;
/* rien ne va plus */
- if (!plane->state->crtc || !plane->state->fb)
+ if (!new_state->crtc || !new_state->fb)
return;
- switch (plane->state->crtc_w) {
+ switch (new_state->crtc_w) {
case 32:
value |= CURSOR_SIZE_32x32;
break;
@@ -890,15 +900,15 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
default:
WARN(1, "cursor size %ux%u not supported\n",
- plane->state->crtc_w, plane->state->crtc_h);
+ new_state->crtc_w, new_state->crtc_h);
return;
}
- value |= (state->iova[0] >> 10) & 0x3fffff;
+ value |= (tegra_plane_state->iova[0] >> 10) & 0x3fffff;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- value = (state->iova[0] >> 32) & 0x3;
+ value = (tegra_plane_state->iova[0] >> 32) & 0x3;
tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
#endif
@@ -917,14 +927,16 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane,
tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
/* position the cursor */
- value = (plane->state->crtc_y & 0x3fff) << 16 |
- (plane->state->crtc_x & 0x3fff);
+ value = (new_state->crtc_y & 0x3fff) << 16 |
+ (new_state->crtc_x & 0x3fff);
tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
}
static void tegra_cursor_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct tegra_dc *dc;
u32 value;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e9ce7d6992d2..90709c38c993 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -65,11 +65,14 @@ static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
struct tegra_drm *tegra = drm->dev_private;
if (tegra->hub) {
+ bool fence_cookie = dma_fence_begin_signalling();
+
drm_atomic_helper_commit_modeset_disables(drm, old_state);
tegra_display_hub_atomic_commit(drm, old_state);
drm_atomic_helper_commit_planes(drm, old_state, 0);
drm_atomic_helper_commit_modeset_enables(drm, old_state);
drm_atomic_helper_commit_hw_done(old_state);
+ dma_fence_end_signalling(fence_cookie);
drm_atomic_helper_wait_for_vblanks(drm, old_state);
drm_atomic_helper_cleanup_planes(drm, old_state);
} else {
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 5ce771cba133..8e6d329d062b 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -336,25 +336,27 @@ static void tegra_dc_remove_shared_plane(struct tegra_dc *dc,
}
static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct tegra_plane_state *plane_state = to_tegra_plane_state(state);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct tegra_plane_state *plane_state = to_tegra_plane_state(new_plane_state);
struct tegra_shared_plane *tegra = to_tegra_shared_plane(plane);
struct tegra_bo_tiling *tiling = &plane_state->tiling;
- struct tegra_dc *dc = to_tegra_dc(state->crtc);
+ struct tegra_dc *dc = to_tegra_dc(new_plane_state->crtc);
int err;
/* no need for further checks if the plane is being disabled */
- if (!state->crtc || !state->fb)
+ if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
- err = tegra_plane_format(state->fb->format->format,
+ err = tegra_plane_format(new_plane_state->fb->format->format,
&plane_state->format,
&plane_state->swap);
if (err < 0)
return err;
- err = tegra_fb_get_tiling(state->fb, tiling);
+ err = tegra_fb_get_tiling(new_plane_state->fb, tiling);
if (err < 0)
return err;
@@ -369,8 +371,8 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
* error out if the user tries to display a framebuffer with such a
* configuration.
*/
- if (state->fb->format->num_planes > 2) {
- if (state->fb->pitches[2] != state->fb->pitches[1]) {
+ if (new_plane_state->fb->format->num_planes > 2) {
+ if (new_plane_state->fb->pitches[2] != new_plane_state->fb->pitches[1]) {
DRM_ERROR("unsupported UV-plane configuration\n");
return -EINVAL;
}
@@ -378,7 +380,7 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
/* XXX scaling is not yet supported, add a check here */
- err = tegra_plane_state_add(&tegra->base, state);
+ err = tegra_plane_state_add(&tegra->base, new_plane_state);
if (err < 0)
return err;
@@ -386,8 +388,10 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
}
static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct tegra_plane *p = to_tegra_plane(plane);
struct tegra_dc *dc;
u32 value;
@@ -423,23 +427,25 @@ static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
}
static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct tegra_plane_state *state = to_tegra_plane_state(plane->state);
- struct tegra_dc *dc = to_tegra_dc(plane->state->crtc);
- unsigned int zpos = plane->state->normalized_zpos;
- struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct tegra_plane_state *tegra_plane_state = to_tegra_plane_state(new_state);
+ struct tegra_dc *dc = to_tegra_dc(new_state->crtc);
+ unsigned int zpos = new_state->normalized_zpos;
+ struct drm_framebuffer *fb = new_state->fb;
struct tegra_plane *p = to_tegra_plane(plane);
dma_addr_t base;
u32 value;
int err;
/* rien ne va plus */
- if (!plane->state->crtc || !plane->state->fb)
+ if (!new_state->crtc || !new_state->fb)
return;
- if (!plane->state->visible) {
- tegra_shared_plane_atomic_disable(plane, old_state);
+ if (!new_state->visible) {
+ tegra_shared_plane_atomic_disable(plane, state);
return;
}
@@ -477,22 +483,22 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
/* disable compression */
tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL);
- base = state->iova[0] + fb->offsets[0];
+ base = tegra_plane_state->iova[0] + fb->offsets[0];
- tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH);
+ tegra_plane_writel(p, tegra_plane_state->format, DC_WIN_COLOR_DEPTH);
tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS);
- value = V_POSITION(plane->state->crtc_y) |
- H_POSITION(plane->state->crtc_x);
+ value = V_POSITION(new_state->crtc_y) |
+ H_POSITION(new_state->crtc_x);
tegra_plane_writel(p, value, DC_WIN_POSITION);
- value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
+ value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
tegra_plane_writel(p, value, DC_WIN_SIZE);
value = WIN_ENABLE | COLOR_EXPAND;
tegra_plane_writel(p, value, DC_WIN_WIN_OPTIONS);
- value = V_SIZE(plane->state->crtc_h) | H_SIZE(plane->state->crtc_w);
+ value = V_SIZE(new_state->crtc_h) | H_SIZE(new_state->crtc_w);
tegra_plane_writel(p, value, DC_WIN_CROPPED_SIZE);
tegra_plane_writel(p, upper_32_bits(base), DC_WINBUF_START_ADDR_HI);
@@ -504,15 +510,15 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane,
value = CLAMP_BEFORE_BLEND | DEGAMMA_SRGB | INPUT_RANGE_FULL;
tegra_plane_writel(p, value, DC_WIN_SET_PARAMS);
- value = OFFSET_X(plane->state->src_y >> 16) |
- OFFSET_Y(plane->state->src_x >> 16);
+ value = OFFSET_X(new_state->src_y >> 16) |
+ OFFSET_Y(new_state->src_x >> 16);
tegra_plane_writel(p, value, DC_WINBUF_CROPPED_POINT);
if (dc->soc->supports_block_linear) {
- unsigned long height = state->tiling.value;
+ unsigned long height = tegra_plane_state->tiling.value;
/* XXX */
- switch (state->tiling.mode) {
+ switch (tegra_plane_state->tiling.mode) {
case TEGRA_BO_TILING_MODE_PITCH:
value = DC_WINBUF_SURFACE_KIND_BLOCK_HEIGHT(0) |
DC_WINBUF_SURFACE_KIND_PITCH;
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 539d14935728..19e8847a164b 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -8,7 +8,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "dc.h"
@@ -198,7 +198,7 @@ int tegra_plane_prepare_fb(struct drm_plane *plane,
if (!state->fb)
return 0;
- drm_gem_fb_prepare_fb(plane, state);
+ drm_gem_plane_helper_prepare_fb(plane, state);
return tegra_dc_pin(dc, to_tegra_plane_state(state));
}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 09485c7f0d6f..95f8e0f78e32 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -4,6 +4,8 @@
* Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
*/
+#include <linux/dma-fence.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -26,6 +28,7 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *ddev = old_state->dev;
struct tidss_device *tidss = to_tidss(ddev);
+ bool fence_cookie = dma_fence_begin_signalling();
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -36,6 +39,7 @@ static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_enables(ddev, old_state);
drm_atomic_helper_commit_hw_done(old_state);
+ dma_fence_end_signalling(fence_cookie);
drm_atomic_helper_wait_for_flip_done(ddev, old_state);
drm_atomic_helper_cleanup_planes(ddev, old_state);
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 35067ae674ea..1acd15aa4193 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -10,7 +10,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include "tidss_crtc.h"
#include "tidss_dispc.h"
@@ -20,8 +20,10 @@
/* drm_plane_helper_funcs */
static int tidss_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_device *ddev = plane->dev;
struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
@@ -33,20 +35,22 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
dev_dbg(ddev->dev, "%s\n", __func__);
- if (!state->crtc) {
+ if (!new_plane_state->crtc) {
/*
* The visible field is not reset by the DRM core but only
* updated by drm_plane_helper_check_state(), set it manually.
*/
- state->visible = false;
+ new_plane_state->visible = false;
return 0;
}
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- ret = drm_atomic_helper_check_plane_state(state, crtc_state, 0,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+ 0,
INT_MAX, true, true);
if (ret < 0)
return ret;
@@ -63,35 +67,37 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
* check for odd height).
*/
- finfo = drm_format_info(state->fb->format->format);
+ finfo = drm_format_info(new_plane_state->fb->format->format);
- if ((state->src_x >> 16) % finfo->hsub != 0) {
+ if ((new_plane_state->src_x >> 16) % finfo->hsub != 0) {
dev_dbg(ddev->dev,
"%s: x-position %u not divisible subpixel size %u\n",
- __func__, (state->src_x >> 16), finfo->hsub);
+ __func__, (new_plane_state->src_x >> 16), finfo->hsub);
return -EINVAL;
}
- if ((state->src_y >> 16) % finfo->vsub != 0) {
+ if ((new_plane_state->src_y >> 16) % finfo->vsub != 0) {
dev_dbg(ddev->dev,
"%s: y-position %u not divisible subpixel size %u\n",
- __func__, (state->src_y >> 16), finfo->vsub);
+ __func__, (new_plane_state->src_y >> 16), finfo->vsub);
return -EINVAL;
}
- if ((state->src_w >> 16) % finfo->hsub != 0) {
+ if ((new_plane_state->src_w >> 16) % finfo->hsub != 0) {
dev_dbg(ddev->dev,
"%s: src width %u not divisible by subpixel size %u\n",
- __func__, (state->src_w >> 16), finfo->hsub);
+ __func__, (new_plane_state->src_w >> 16),
+ finfo->hsub);
return -EINVAL;
}
- if (!state->visible)
+ if (!new_plane_state->visible)
return 0;
- hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+ hw_videoport = to_tidss_crtc(new_plane_state->crtc)->hw_videoport;
- ret = dispc_plane_check(tidss->dispc, hw_plane, state, hw_videoport);
+ ret = dispc_plane_check(tidss->dispc, hw_plane, new_plane_state,
+ hw_videoport);
if (ret)
return ret;
@@ -99,26 +105,27 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
}
static void tidss_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *ddev = plane->dev;
struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
u32 hw_videoport;
int ret;
dev_dbg(ddev->dev, "%s\n", __func__);
- if (!state->visible) {
+ if (!new_state->visible) {
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
return;
}
- hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+ hw_videoport = to_tidss_crtc(new_state->crtc)->hw_videoport;
ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id,
- state, hw_videoport);
+ new_state, hw_videoport);
if (ret) {
dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n",
@@ -131,7 +138,7 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
}
static void tidss_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *ddev = plane->dev;
struct tidss_device *tidss = to_tidss(ddev);
@@ -151,7 +158,7 @@ static void drm_plane_destroy(struct drm_plane *plane)
}
static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
- .prepare_fb = drm_gem_fb_prepare_fb,
+ .prepare_fb = drm_gem_plane_helper_prepare_fb,
.atomic_check = tidss_plane_atomic_check,
.atomic_update = tidss_plane_atomic_update,
.atomic_disable = tidss_plane_atomic_disable,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 30213708fc99..21e7b3d23c7d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -393,7 +393,7 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc)
return;
}
}
- reg |= info->fdd < 12;
+ reg |= info->fdd << 12;
tilcdc_write(dev, LCDC_RASTER_CTRL_REG, reg);
if (info->invert_pxl_clk)
@@ -515,6 +515,15 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
drm_crtc_vblank_off(crtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+
+ spin_unlock_irq(&crtc->dev->event_lock);
+
tilcdc_crtc_disable_irqs(dev);
pm_runtime_put_sync(dev->dev);
@@ -904,13 +913,12 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
tilcdc_clear_irqstatus(dev, stat);
if (stat & LCDC_END_OF_FRAME0) {
- unsigned long flags;
bool skip_event = false;
ktime_t now;
now = ktime_get();
- spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+ spin_lock(&tilcdc_crtc->irq_lock);
tilcdc_crtc->last_vblank = now;
@@ -920,21 +928,21 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
skip_event = true;
}
- spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+ spin_unlock(&tilcdc_crtc->irq_lock);
drm_crtc_handle_vblank(crtc);
if (!skip_event) {
struct drm_pending_vblank_event *event;
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock(&dev->event_lock);
event = tilcdc_crtc->event;
tilcdc_crtc->event = NULL;
if (event)
drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock(&dev->event_lock);
}
if (tilcdc_crtc->frame_intact)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index 2f681a713815..74a5c8832229 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -21,48 +21,51 @@ static const struct drm_plane_funcs tilcdc_plane_funcs = {
};
static int tilcdc_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state;
- struct drm_plane_state *old_state = plane->state;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
unsigned int pitch;
- if (!state->crtc)
+ if (!new_state->crtc)
return 0;
- if (WARN_ON(!state->fb))
+ if (WARN_ON(!new_state->fb))
return -EINVAL;
- if (state->crtc_x || state->crtc_y) {
+ if (new_state->crtc_x || new_state->crtc_y) {
dev_err(plane->dev->dev, "%s: crtc position must be zero.",
__func__);
return -EINVAL;
}
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_state->crtc);
/* we should have a crtc state if the plane is attached to a crtc */
if (WARN_ON(!crtc_state))
return 0;
- if (crtc_state->mode.hdisplay != state->crtc_w ||
- crtc_state->mode.vdisplay != state->crtc_h) {
+ if (crtc_state->mode.hdisplay != new_state->crtc_w ||
+ crtc_state->mode.vdisplay != new_state->crtc_h) {
dev_err(plane->dev->dev,
"%s: Size must match mode (%dx%d == %dx%d)", __func__,
crtc_state->mode.hdisplay, crtc_state->mode.vdisplay,
- state->crtc_w, state->crtc_h);
+ new_state->crtc_w, new_state->crtc_h);
return -EINVAL;
}
pitch = crtc_state->mode.hdisplay *
- state->fb->format->cpp[0];
- if (state->fb->pitches[0] != pitch) {
+ new_state->fb->format->cpp[0];
+ if (new_state->fb->pitches[0] != pitch) {
dev_err(plane->dev->dev,
"Invalid pitch: fb and crtc widths must be the same");
return -EINVAL;
}
- if (old_state->fb && state->fb->format != old_state->fb->format) {
+ if (old_state->fb && new_state->fb->format != old_state->fb->format) {
dev_dbg(plane->dev->dev,
"%s(): pixel format change requires mode_change\n",
__func__);
@@ -73,20 +76,21 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
}
static void tilcdc_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_plane_state *state = plane->state;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
- if (!state->crtc)
+ if (!new_state->crtc)
return;
- if (WARN_ON(!state->fb || !state->crtc->state))
+ if (WARN_ON(!new_state->fb || !new_state->crtc->state))
return;
- if (tilcdc_crtc_update_fb(state->crtc,
- state->fb,
- state->crtc->state->event) == 0) {
- state->crtc->state->event = NULL;
+ if (tilcdc_crtc_update_fb(new_state->crtc,
+ new_state->fb,
+ new_state->crtc->state->event) == 0) {
+ new_state->crtc->state->event = NULL;
}
}
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 2b6414f0fa75..9bbaa1a69050 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -1,5 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_ARCPGU
+ tristate "ARC PGU"
+ depends on DRM && OF
+ select DRM_KMS_CMA_HELPER
+ select DRM_KMS_HELPER
+ help
+ Choose this option if you have an ARC PGU controller.
+
+ If M is selected the module will be called arcpgu.
+
config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
depends on DRM && PCI && MMU
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 6ae4e9e5a35f..bef6780bdd6f 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c
new file mode 100644
index 000000000000..f8531c50a072
--- /dev/null
+++ b/drivers/gpu/drm/tiny/arcpgu.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ARC PGU DRM driver.
+ *
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ */
+
+#include <linux/clk.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+
+#define ARCPGU_REG_CTRL 0x00
+#define ARCPGU_REG_STAT 0x04
+#define ARCPGU_REG_FMT 0x10
+#define ARCPGU_REG_HSYNC 0x14
+#define ARCPGU_REG_VSYNC 0x18
+#define ARCPGU_REG_ACTIVE 0x1c
+#define ARCPGU_REG_BUF0_ADDR 0x40
+#define ARCPGU_REG_STRIDE 0x50
+#define ARCPGU_REG_START_SET 0x84
+
+#define ARCPGU_REG_ID 0x3FC
+
+#define ARCPGU_CTRL_ENABLE_MASK 0x02
+#define ARCPGU_CTRL_VS_POL_MASK 0x1
+#define ARCPGU_CTRL_VS_POL_OFST 0x3
+#define ARCPGU_CTRL_HS_POL_MASK 0x1
+#define ARCPGU_CTRL_HS_POL_OFST 0x4
+#define ARCPGU_MODE_XRGB8888 BIT(2)
+#define ARCPGU_STAT_BUSY_MASK 0x02
+
+struct arcpgu_drm_private {
+ struct drm_device drm;
+ void __iomem *regs;
+ struct clk *clk;
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector sim_conn;
+};
+
+#define dev_to_arcpgu(x) container_of(x, struct arcpgu_drm_private, drm)
+
+#define pipe_to_arcpgu_priv(x) container_of(x, struct arcpgu_drm_private, pipe)
+
+static inline void arc_pgu_write(struct arcpgu_drm_private *arcpgu,
+ unsigned int reg, u32 value)
+{
+ iowrite32(value, arcpgu->regs + reg);
+}
+
+static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
+ unsigned int reg)
+{
+ return ioread32(arcpgu->regs + reg);
+}
+
+#define XRES_DEF 640
+#define YRES_DEF 480
+
+#define XRES_MAX 8192
+#define YRES_MAX 8192
+
+static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
+{
+ int count;
+
+ count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
+ drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+ return count;
+}
+
+static const struct drm_connector_helper_funcs
+arcpgu_drm_connector_helper_funcs = {
+ .get_modes = arcpgu_drm_connector_get_modes,
+};
+
+static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int arcpgu_drm_sim_init(struct drm_device *drm, struct drm_connector *connector)
+{
+ drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
+ return drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
+#define ENCODE_PGU_XY(x, y) ((((x) - 1) << 16) | ((y) - 1))
+
+static const u32 arc_pgu_supported_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+};
+
+static void arc_pgu_set_pxl_fmt(struct arcpgu_drm_private *arcpgu)
+{
+ const struct drm_framebuffer *fb = arcpgu->pipe.plane.state->fb;
+ uint32_t pixel_format = fb->format->format;
+ u32 format = DRM_FORMAT_INVALID;
+ int i;
+ u32 reg_ctrl;
+
+ for (i = 0; i < ARRAY_SIZE(arc_pgu_supported_formats); i++) {
+ if (arc_pgu_supported_formats[i] == pixel_format)
+ format = arc_pgu_supported_formats[i];
+ }
+
+ if (WARN_ON(format == DRM_FORMAT_INVALID))
+ return;
+
+ reg_ctrl = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
+ if (format == DRM_FORMAT_RGB565)
+ reg_ctrl &= ~ARCPGU_MODE_XRGB8888;
+ else
+ reg_ctrl |= ARCPGU_MODE_XRGB8888;
+ arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, reg_ctrl);
+}
+
+static enum drm_mode_status arc_pgu_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+{
+ struct arcpgu_drm_private *arcpgu = pipe_to_arcpgu_priv(pipe);
+ long rate, clk_rate = mode->clock * 1000;
+ long diff = clk_rate / 200; /* +-0.5% allowed by HDMI spec */
+
+ rate = clk_round_rate(arcpgu->clk, clk_rate);
+ if ((max(rate, clk_rate) - min(rate, clk_rate) < diff) && (rate > 0))
+ return MODE_OK;
+
+ return MODE_NOCLOCK;
+}
+
+static void arc_pgu_mode_set(struct arcpgu_drm_private *arcpgu)
+{
+ struct drm_display_mode *m = &arcpgu->pipe.crtc.state->adjusted_mode;
+ u32 val;
+
+ arc_pgu_write(arcpgu, ARCPGU_REG_FMT,
+ ENCODE_PGU_XY(m->crtc_htotal, m->crtc_vtotal));
+
+ arc_pgu_write(arcpgu, ARCPGU_REG_HSYNC,
+ ENCODE_PGU_XY(m->crtc_hsync_start - m->crtc_hdisplay,
+ m->crtc_hsync_end - m->crtc_hdisplay));
+
+ arc_pgu_write(arcpgu, ARCPGU_REG_VSYNC,
+ ENCODE_PGU_XY(m->crtc_vsync_start - m->crtc_vdisplay,
+ m->crtc_vsync_end - m->crtc_vdisplay));
+
+ arc_pgu_write(arcpgu, ARCPGU_REG_ACTIVE,
+ ENCODE_PGU_XY(m->crtc_hblank_end - m->crtc_hblank_start,
+ m->crtc_vblank_end - m->crtc_vblank_start));
+
+ val = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
+
+ if (m->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST;
+ else
+ val &= ~(ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST);
+
+ if (m->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST;
+ else
+ val &= ~(ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST);
+
+ arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, val);
+ arc_pgu_write(arcpgu, ARCPGU_REG_STRIDE, 0);
+ arc_pgu_write(arcpgu, ARCPGU_REG_START_SET, 1);
+
+ arc_pgu_set_pxl_fmt(arcpgu);
+
+ clk_set_rate(arcpgu->clk, m->crtc_clock * 1000);
+}
+
+static void arc_pgu_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct arcpgu_drm_private *arcpgu = pipe_to_arcpgu_priv(pipe);
+
+ arc_pgu_mode_set(arcpgu);
+
+ clk_prepare_enable(arcpgu->clk);
+ arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
+ arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
+ ARCPGU_CTRL_ENABLE_MASK);
+}
+
+static void arc_pgu_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct arcpgu_drm_private *arcpgu = pipe_to_arcpgu_priv(pipe);
+
+ clk_disable_unprepare(arcpgu->clk);
+ arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
+ arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
+ ~ARCPGU_CTRL_ENABLE_MASK);
+}
+
+static void arc_pgu_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *state)
+{
+ struct arcpgu_drm_private *arcpgu;
+ struct drm_gem_cma_object *gem;
+
+ if (!pipe->plane.state->fb)
+ return;
+
+ arcpgu = pipe_to_arcpgu_priv(pipe);
+ gem = drm_fb_cma_get_gem_obj(pipe->plane.state->fb, 0);
+ arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->paddr);
+}
+
+static const struct drm_simple_display_pipe_funcs arc_pgu_pipe_funcs = {
+ .update = arc_pgu_update,
+ .mode_valid = arc_pgu_mode_valid,
+ .enable = arc_pgu_enable,
+ .disable = arc_pgu_disable,
+};
+
+static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
+
+static int arcpgu_load(struct arcpgu_drm_private *arcpgu)
+{
+ struct platform_device *pdev = to_platform_device(arcpgu->drm.dev);
+ struct device_node *encoder_node = NULL, *endpoint_node = NULL;
+ struct drm_connector *connector = NULL;
+ struct drm_device *drm = &arcpgu->drm;
+ struct resource *res;
+ int ret;
+
+ arcpgu->clk = devm_clk_get(drm->dev, "pxlclk");
+ if (IS_ERR(arcpgu->clk))
+ return PTR_ERR(arcpgu->clk);
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = 1920;
+ drm->mode_config.max_height = 1080;
+ drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(arcpgu->regs))
+ return PTR_ERR(arcpgu->regs);
+
+ dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
+ arc_pgu_read(arcpgu, ARCPGU_REG_ID));
+
+ /* Get the optional framebuffer memory resource */
+ ret = of_reserved_mem_device_init(drm->dev);
+ if (ret && ret != -ENODEV)
+ return ret;
+
+ if (dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)))
+ return -ENODEV;
+
+ /*
+ * There is only one output port inside each device. It is linked with
+ * encoder endpoint.
+ */
+ endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
+ if (endpoint_node) {
+ encoder_node = of_graph_get_remote_port_parent(endpoint_node);
+ of_node_put(endpoint_node);
+ } else {
+ connector = &arcpgu->sim_conn;
+ dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n");
+ ret = arcpgu_drm_sim_init(drm, connector);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = drm_simple_display_pipe_init(drm, &arcpgu->pipe, &arc_pgu_pipe_funcs,
+ arc_pgu_supported_formats,
+ ARRAY_SIZE(arc_pgu_supported_formats),
+ NULL, connector);
+ if (ret)
+ return ret;
+
+ if (encoder_node) {
+ struct drm_bridge *bridge;
+
+ /* Locate drm bridge from the hdmi encoder DT node */
+ bridge = of_drm_find_bridge(encoder_node);
+ if (!bridge)
+ return -EPROBE_DEFER;
+
+ ret = drm_simple_display_pipe_attach_bridge(&arcpgu->pipe, bridge);
+ if (ret)
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+ drm_kms_helper_poll_init(drm);
+
+ platform_set_drvdata(pdev, drm);
+ return 0;
+}
+
+static int arcpgu_unload(struct drm_device *drm)
+{
+ drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int arcpgu_show_pxlclock(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_device *drm = node->minor->dev;
+ struct arcpgu_drm_private *arcpgu = dev_to_arcpgu(drm);
+ unsigned long clkrate = clk_get_rate(arcpgu->clk);
+ unsigned long mode_clock = arcpgu->pipe.crtc.mode.crtc_clock * 1000;
+
+ seq_printf(m, "hw : %lu\n", clkrate);
+ seq_printf(m, "mode: %lu\n", mode_clock);
+ return 0;
+}
+
+static struct drm_info_list arcpgu_debugfs_list[] = {
+ { "clocks", arcpgu_show_pxlclock, 0 },
+};
+
+static void arcpgu_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(arcpgu_debugfs_list,
+ ARRAY_SIZE(arcpgu_debugfs_list),
+ minor->debugfs_root, minor);
+}
+#endif
+
+static const struct drm_driver arcpgu_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ .name = "arcpgu",
+ .desc = "ARC PGU Controller",
+ .date = "20160219",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+ .fops = &arcpgu_drm_ops,
+ DRM_GEM_CMA_DRIVER_OPS,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = arcpgu_debugfs_init,
+#endif
+};
+
+static int arcpgu_probe(struct platform_device *pdev)
+{
+ struct arcpgu_drm_private *arcpgu;
+ int ret;
+
+ arcpgu = devm_drm_dev_alloc(&pdev->dev, &arcpgu_drm_driver,
+ struct arcpgu_drm_private, drm);
+ if (IS_ERR(arcpgu))
+ return PTR_ERR(arcpgu);
+
+ ret = arcpgu_load(arcpgu);
+ if (ret)
+ return ret;
+
+ ret = drm_dev_register(&arcpgu->drm, 0);
+ if (ret)
+ goto err_unload;
+
+ drm_fbdev_generic_setup(&arcpgu->drm, 16);
+
+ return 0;
+
+err_unload:
+ arcpgu_unload(&arcpgu->drm);
+
+ return ret;
+}
+
+static int arcpgu_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+ arcpgu_unload(drm);
+
+ return 0;
+}
+
+static const struct of_device_id arcpgu_of_table[] = {
+ {.compatible = "snps,arcpgu"},
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, arcpgu_of_table);
+
+static struct platform_driver arcpgu_platform_driver = {
+ .probe = arcpgu_probe,
+ .remove = arcpgu_remove,
+ .driver = {
+ .name = "arcpgu",
+ .of_match_table = arcpgu_of_table,
+ },
+};
+
+module_platform_driver(arcpgu_platform_driver);
+
+MODULE_AUTHOR("Carlos Palminha <palminha@synopsys.com>");
+MODULE_DESCRIPTION("ARC PGU DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index a043e602199e..ad922c3ec681 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -33,8 +33,9 @@
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -311,22 +312,15 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
return 0;
}
-static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
+static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_map *map,
struct drm_rect *rect)
{
struct cirrus_device *cirrus = to_cirrus(fb->dev);
- struct dma_buf_map map;
- void *vmap;
- int idx, ret;
+ void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
+ int idx;
- ret = -ENODEV;
if (!drm_dev_enter(&cirrus->dev, &idx))
- goto out;
-
- ret = drm_gem_shmem_vmap(fb->obj[0], &map);
- if (ret)
- goto out_dev_exit;
- vmap = map.vaddr; /* TODO: Use mapping abstraction properly */
+ return -ENODEV;
if (cirrus->cpp == fb->format->cpp[0])
drm_fb_memcpy_dstclip(cirrus->vram,
@@ -345,16 +339,12 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
else
WARN_ON_ONCE("cpp mismatch");
- drm_gem_shmem_vunmap(fb->obj[0], &map);
- ret = 0;
-
-out_dev_exit:
drm_dev_exit(idx);
-out:
- return ret;
+
+ return 0;
}
-static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
+static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb, const struct dma_buf_map *map)
{
struct drm_rect fullscreen = {
.x1 = 0,
@@ -362,7 +352,7 @@ static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
.y1 = 0,
.y2 = fb->height,
};
- return cirrus_fb_blit_rect(fb, &fullscreen);
+ return cirrus_fb_blit_rect(fb, map, &fullscreen);
}
static int cirrus_check_size(int width, int height,
@@ -441,9 +431,10 @@ static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
- cirrus_fb_blit_fullscreen(plane_state->fb);
+ cirrus_fb_blit_fullscreen(plane_state->fb, &shadow_plane_state->map[0]);
}
static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -451,16 +442,15 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
{
struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
- if (pipe->plane.state->fb &&
- cirrus->cpp != cirrus_cpp(pipe->plane.state->fb))
- cirrus_mode_set(cirrus, &crtc->mode,
- pipe->plane.state->fb);
+ if (state->fb && cirrus->cpp != cirrus_cpp(state->fb))
+ cirrus_mode_set(cirrus, &crtc->mode, state->fb);
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
+ cirrus_fb_blit_rect(state->fb, &shadow_plane_state->map[0], &rect);
}
static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
@@ -468,6 +458,7 @@ static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
.check = cirrus_pipe_check,
.enable = cirrus_pipe_enable,
.update = cirrus_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
};
static const uint32_t cirrus_formats[] = {
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 23866a54e3f9..a233c86d428b 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -16,8 +16,9 @@
#include <drm/drm_file.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -95,6 +96,7 @@ struct gm12u320_device {
struct drm_rect rect;
int frame;
int draw_status_timeout;
+ struct dma_buf_map src_map;
} fb_update;
};
@@ -251,7 +253,6 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
{
int block, dst_offset, len, remain, ret, x1, x2, y1, y2;
struct drm_framebuffer *fb;
- struct dma_buf_map map;
void *vaddr;
u8 *src;
@@ -265,20 +266,14 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
x2 = gm12u320->fb_update.rect.x2;
y1 = gm12u320->fb_update.rect.y1;
y2 = gm12u320->fb_update.rect.y2;
-
- ret = drm_gem_shmem_vmap(fb->obj[0], &map);
- if (ret) {
- GM12U320_ERR("failed to vmap fb: %d\n", ret);
- goto put_fb;
- }
- vaddr = map.vaddr; /* TODO: Use mapping abstraction properly */
+ vaddr = gm12u320->fb_update.src_map.vaddr; /* TODO: Use mapping abstraction properly */
if (fb->obj[0]->import_attach) {
ret = dma_buf_begin_cpu_access(
fb->obj[0]->import_attach->dmabuf, DMA_FROM_DEVICE);
if (ret) {
GM12U320_ERR("dma_buf_begin_cpu_access err: %d\n", ret);
- goto vunmap;
+ goto put_fb;
}
}
@@ -322,8 +317,6 @@ static void gm12u320_copy_fb_to_blocks(struct gm12u320_device *gm12u320)
if (ret)
GM12U320_ERR("dma_buf_end_cpu_access err: %d\n", ret);
}
-vunmap:
- drm_gem_shmem_vunmap(fb->obj[0], &map);
put_fb:
drm_framebuffer_put(fb);
gm12u320->fb_update.fb = NULL;
@@ -411,7 +404,7 @@ err:
GM12U320_ERR("Frame update error: %d\n", ret);
}
-static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
+static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb, const struct dma_buf_map *map,
struct drm_rect *dirty)
{
struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev);
@@ -425,6 +418,7 @@ static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
drm_framebuffer_get(fb);
gm12u320->fb_update.fb = fb;
gm12u320->fb_update.rect = *dirty;
+ gm12u320->fb_update.src_map = *map;
wakeup = true;
} else {
struct drm_rect *rect = &gm12u320->fb_update.rect;
@@ -453,6 +447,7 @@ static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320)
mutex_lock(&gm12u320->fb_update.lock);
old_fb = gm12u320->fb_update.fb;
gm12u320->fb_update.fb = NULL;
+ dma_buf_map_clear(&gm12u320->fb_update.src_map);
mutex_unlock(&gm12u320->fb_update.lock);
drm_framebuffer_put(old_fb);
@@ -565,9 +560,10 @@ static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe,
{
struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT };
struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT;
- gm12u320_fb_mark_dirty(plane_state->fb, &rect);
+ gm12u320_fb_mark_dirty(plane_state->fb, &shadow_plane_state->map[0], &rect);
}
static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe)
@@ -581,16 +577,18 @@ static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
- gm12u320_fb_mark_dirty(pipe->plane.state->fb, &rect);
+ gm12u320_fb_mark_dirty(state->fb, &shadow_plane_state->map[0], &rect);
}
static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = {
.enable = gm12u320_pipe_enable,
.disable = gm12u320_pipe_disable,
.update = gm12u320_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
};
static const uint32_t gm12u320_pipe_formats[] = {
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index c6525cd02bc2..3e2c2868a363 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -19,8 +19,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -184,7 +184,7 @@ static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
.enable = yx240qv29_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
static const struct drm_display_mode yx350hv15_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 8e98962db5a2..6b87df19eec1 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -22,8 +22,8 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -328,7 +328,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
.enable = ili9225_pipe_enable,
.disable = ili9225_pipe_disable,
.update = ili9225_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
static const struct drm_display_mode ili9225_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 6ce97f0698eb..a97f3f70e4a6 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -18,8 +18,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -140,7 +140,7 @@ static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
.enable = yx240qv29_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
static const struct drm_display_mode yx240qv29_mode = {
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index d7ce40eb166a..6422a7f67079 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -17,8 +17,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -153,7 +153,7 @@ static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
.enable = waveshare_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
static const struct drm_display_mode waveshare_mode = {
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index ff77f983f803..dc76fe53aa72 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -16,8 +16,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -144,7 +144,7 @@ static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
.enable = mi0283qt_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
static const struct drm_display_mode mi0283qt_mode = {
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 11c602fc9897..2cee07a2e00b 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -29,6 +29,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
@@ -860,7 +861,7 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
.enable = repaper_pipe_enable,
.disable = repaper_pipe_disable,
.update = repaper_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
static int repaper_connector_get_modes(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index ff5cf60f4bd7..7d216fe9267f 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -19,8 +19,8 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_format_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -268,7 +268,7 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
.enable = st7586_pipe_enable,
.disable = st7586_pipe_disable,
.update = st7586_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
static const struct drm_display_mode st7586_mode = {
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index faaba0a033ea..df8872d62cdd 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -19,8 +19,8 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
@@ -136,7 +136,7 @@ static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
.enable = st7735r_pipe_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
};
static const struct st7735r_cfg jd_t18003_t01_cfg = {
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index b6f5f87b270f..40e5e9da7953 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -2,10 +2,9 @@
#
# Makefile for the drm device driver. This driver provides support for the
-ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
- ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
- ttm_execbuf_util.o ttm_range_manager.o \
- ttm_resource.o ttm_pool.o
+ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
+ ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \
+ ttm_device.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 8f9fa4188897..0226ae69d3ab 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -49,7 +49,7 @@ struct ttm_agp_backend {
int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
+ struct page *dummy_read_page = ttm_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
int ret, cached = ttm->caching == ttm_cached;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 101a68dc615b..3c23e863a3f0 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -44,21 +44,6 @@
#include "ttm_module.h"
-static void ttm_bo_global_kobj_release(struct kobject *kobj);
-
-/*
- * ttm_global_mutex - protecting the global BO state
- */
-DEFINE_MUTEX(ttm_global_mutex);
-unsigned ttm_bo_glob_use_count;
-struct ttm_bo_global ttm_bo_glob;
-EXPORT_SYMBOL(ttm_bo_glob);
-
-static struct attribute ttm_bo_count = {
- .name = "bo_count",
- .mode = S_IRUGO
-};
-
/* default destructor */
static void ttm_bo_default_destroy(struct ttm_buffer_object *bo)
{
@@ -84,41 +69,15 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
}
}
-static ssize_t ttm_bo_global_show(struct kobject *kobj,
- struct attribute *attr,
- char *buffer)
-{
- struct ttm_bo_global *glob =
- container_of(kobj, struct ttm_bo_global, kobj);
-
- return snprintf(buffer, PAGE_SIZE, "%d\n",
- atomic_read(&glob->bo_count));
-}
-
-static struct attribute *ttm_bo_global_attrs[] = {
- &ttm_bo_count,
- NULL
-};
-
-static const struct sysfs_ops ttm_bo_global_ops = {
- .show = &ttm_bo_global_show
-};
-
-static struct kobj_type ttm_bo_glob_kobj_type = {
- .release = &ttm_bo_global_kobj_release,
- .sysfs_ops = &ttm_bo_global_ops,
- .default_attrs = ttm_bo_global_attrs
-};
-
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
list_del_init(&bo->swap);
list_del_init(&bo->lru);
- if (bdev->driver->del_from_lru_notify)
- bdev->driver->del_from_lru_notify(bo);
+ if (bdev->funcs->del_from_lru_notify)
+ bdev->funcs->del_from_lru_notify(bo);
}
static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
@@ -133,7 +92,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct ttm_lru_bulk_move *bulk)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *man;
if (!bo->deleted)
@@ -151,12 +110,14 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
TTM_PAGE_FLAG_SWAPPED))) {
struct list_head *swap;
- swap = &ttm_bo_glob.swap_lru[bo->priority];
+ swap = &ttm_glob.swap_lru[bo->priority];
list_move_tail(&bo->swap, swap);
+ } else {
+ list_del_init(&bo->swap);
}
- if (bdev->driver->del_from_lru_notify)
- bdev->driver->del_from_lru_notify(bo);
+ if (bdev->funcs->del_from_lru_notify)
+ bdev->funcs->del_from_lru_notify(bo);
if (bulk && !bo->pin_count) {
switch (bo->mem.mem_type) {
@@ -219,7 +180,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
dma_resv_assert_held(pos->first->base.resv);
dma_resv_assert_held(pos->last->base.resv);
- lru = &ttm_bo_glob.swap_lru[i];
+ lru = &ttm_glob.swap_lru[i];
list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
}
}
@@ -230,7 +191,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_place *hop)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
int ret;
@@ -256,7 +217,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
}
- ret = bdev->driver->move(bo, evict, ctx, mem, hop);
+ ret = bdev->funcs->move(bo, evict, ctx, mem, hop);
if (ret) {
if (ret == -EMULTIHOP)
return ret;
@@ -284,8 +245,8 @@ out_err:
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
- if (bo->bdev->driver->delete_mem_notify)
- bo->bdev->driver->delete_mem_notify(bo);
+ if (bo->bdev->funcs->delete_mem_notify)
+ bo->bdev->funcs->delete_mem_notify(bo);
ttm_bo_tt_destroy(bo);
ttm_resource_free(bo, &bo->mem);
@@ -310,9 +271,9 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
* reference it any more. The only tricky case is the trylock on
* the resv object while holding the lru_lock.
*/
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
bo->base.resv = &bo->base._resv;
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
}
return r;
@@ -371,7 +332,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
30 * HZ);
@@ -381,7 +342,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
else if (lret == 0)
return -EBUSY;
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
@@ -391,7 +352,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* delayed destruction would succeed, so just return success
* here.
*/
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
return 0;
}
ret = 0;
@@ -400,13 +361,13 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
return ret;
}
ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
@@ -421,9 +382,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
* Traverse the delayed list, and call ttm_bo_cleanup_refs on all
* encountered buffers.
*/
-static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
{
- struct ttm_bo_global *glob = &ttm_bo_glob;
+ struct ttm_global *glob = &ttm_glob;
struct list_head removed;
bool empty;
@@ -462,22 +423,11 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
return empty;
}
-static void ttm_bo_delayed_workqueue(struct work_struct *work)
-{
- struct ttm_bo_device *bdev =
- container_of(work, struct ttm_bo_device, wq.work);
-
- if (!ttm_bo_delayed_delete(bdev, false))
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
-}
-
static void ttm_bo_release(struct kref *kref)
{
struct ttm_buffer_object *bo =
container_of(kref, struct ttm_buffer_object, kref);
- struct ttm_bo_device *bdev = bo->bdev;
- size_t acc_size = bo->acc_size;
+ struct ttm_device *bdev = bo->bdev;
int ret;
if (!bo->deleted) {
@@ -490,8 +440,8 @@ static void ttm_bo_release(struct kref *kref)
30 * HZ);
}
- if (bo->bdev->driver->release_notify)
- bo->bdev->driver->release_notify(bo);
+ if (bo->bdev->funcs->release_notify)
+ bo->bdev->funcs->release_notify(bo);
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
ttm_mem_io_free(bdev, &bo->mem);
@@ -503,7 +453,7 @@ static void ttm_bo_release(struct kref *kref)
ttm_bo_flush_all_fences(bo);
bo->deleted = true;
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
/*
* Make pinned bos immediately available to
@@ -520,27 +470,26 @@ static void ttm_bo_release(struct kref *kref)
kref_init(&bo->kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
return;
}
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
ttm_bo_del_from_lru(bo);
list_del(&bo->ddestroy);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
dma_resv_unlock(bo->base.resv);
- atomic_dec(&ttm_bo_glob.bo_count);
+ atomic_dec(&ttm_glob.bo_count);
dma_fence_put(bo->moving);
if (!ttm_bo_uses_embedded_gem_object(bo))
dma_resv_fini(&bo->base._resv);
bo->destroy(bo);
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
void ttm_bo_put(struct ttm_buffer_object *bo)
@@ -549,13 +498,13 @@ void ttm_bo_put(struct ttm_buffer_object *bo)
}
EXPORT_SYMBOL(ttm_bo_put);
-int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
+int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev)
{
return cancel_delayed_work_sync(&bdev->wq);
}
EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
-void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
+void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched)
{
if (resched)
schedule_delayed_work(&bdev->wq,
@@ -566,7 +515,7 @@ EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
static int ttm_bo_evict(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
struct ttm_resource evict_mem;
struct ttm_placement placement;
struct ttm_place hop;
@@ -578,7 +527,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
placement.num_placement = 0;
placement.num_busy_placement = 0;
- bdev->driver->evict_flags(bo, &placement);
+ bdev->funcs->evict_flags(bo, &placement);
if (!placement.num_placement && !placement.num_busy_placement) {
ttm_bo_wait(bo, false, false);
@@ -694,7 +643,7 @@ static int ttm_mem_evict_wait_busy(struct ttm_buffer_object *busy_bo,
return r == -EDEADLK ? -EBUSY : r;
}
-int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+int ttm_mem_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
@@ -705,7 +654,7 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev,
unsigned i;
int ret;
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
@@ -718,7 +667,7 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev,
continue;
}
- if (place && !bdev->driver->eviction_valuable(bo,
+ if (place && !bdev->funcs->eviction_valuable(bo,
place)) {
if (locked)
dma_resv_unlock(bo->base.resv);
@@ -742,7 +691,7 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (!bo) {
if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
busy_bo = NULL;
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
ttm_bo_put(busy_bo);
@@ -756,7 +705,7 @@ int ttm_mem_evict_first(struct ttm_bo_device *bdev,
return ret;
}
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
ret = ttm_bo_evict(bo, ctx);
if (locked)
@@ -811,7 +760,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct ttm_operation_ctx *ctx)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type);
struct ww_acquire_ctx *ticket;
int ret;
@@ -846,7 +795,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
const struct ttm_place *place,
struct ttm_resource *mem)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *man;
man = ttm_manager_type(bdev, place->mem_type);
@@ -856,9 +805,9 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
mem->mem_type = place->mem_type;
mem->placement = place->flags;
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, mem, NULL);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
return 0;
}
@@ -876,7 +825,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct ttm_operation_ctx *ctx)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
bool type_found = false;
int i, ret;
@@ -1097,32 +1046,20 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_validate);
-int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
+int ttm_bo_init_reserved(struct ttm_device *bdev,
struct ttm_buffer_object *bo,
size_t size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
struct ttm_operation_ctx *ctx,
- size_t acc_size,
struct sg_table *sg,
struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
- struct ttm_mem_global *mem_glob = &ttm_mem_glob;
bool locked;
int ret = 0;
- ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
- if (ret) {
- pr_err("Out of kernel memory\n");
- if (destroy)
- (*destroy)(bo);
- else
- kfree(bo);
- return -ENOMEM;
- }
-
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
@@ -1139,7 +1076,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->mem.bus.addr = NULL;
bo->moving = NULL;
bo->mem.placement = 0;
- bo->acc_size = acc_size;
bo->pin_count = 0;
bo->sg = sg;
if (resv) {
@@ -1157,7 +1093,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
dma_resv_init(&bo->base._resv);
drm_vma_node_reset(&bo->base.vma_node);
}
- atomic_inc(&ttm_bo_glob.bo_count);
+ atomic_inc(&ttm_glob.bo_count);
/*
* For ttm_bo_type_device buffers, allocate
@@ -1193,14 +1129,13 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_bo_init_reserved);
-int ttm_bo_init(struct ttm_bo_device *bdev,
+int ttm_bo_init(struct ttm_device *bdev,
struct ttm_buffer_object *bo,
size_t size,
enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
bool interruptible,
- size_t acc_size,
struct sg_table *sg,
struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
@@ -1209,8 +1144,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
int ret;
ret = ttm_bo_init_reserved(bdev, bo, size, type, placement,
- page_alignment, &ctx, acc_size,
- sg, resv, destroy);
+ page_alignment, &ctx, sg, resv, destroy);
if (ret)
return ret;
@@ -1221,171 +1155,13 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_bo_init);
-size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size)
-{
- unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
- size_t size = 0;
-
- size += ttm_round_pot(struct_size);
- size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
- size += ttm_round_pot(sizeof(struct ttm_tt));
- return size;
-}
-EXPORT_SYMBOL(ttm_bo_dma_acc_size);
-
-static void ttm_bo_global_kobj_release(struct kobject *kobj)
-{
- struct ttm_bo_global *glob =
- container_of(kobj, struct ttm_bo_global, kobj);
-
- __free_page(glob->dummy_read_page);
-}
-
-static void ttm_bo_global_release(void)
-{
- struct ttm_bo_global *glob = &ttm_bo_glob;
-
- mutex_lock(&ttm_global_mutex);
- if (--ttm_bo_glob_use_count > 0)
- goto out;
-
- kobject_del(&glob->kobj);
- kobject_put(&glob->kobj);
- ttm_mem_global_release(&ttm_mem_glob);
- memset(glob, 0, sizeof(*glob));
-out:
- mutex_unlock(&ttm_global_mutex);
-}
-
-static int ttm_bo_global_init(void)
-{
- struct ttm_bo_global *glob = &ttm_bo_glob;
- int ret = 0;
- unsigned i;
-
- mutex_lock(&ttm_global_mutex);
- if (++ttm_bo_glob_use_count > 1)
- goto out;
-
- ret = ttm_mem_global_init(&ttm_mem_glob);
- if (ret)
- goto out;
-
- spin_lock_init(&glob->lru_lock);
- glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
-
- if (unlikely(glob->dummy_read_page == NULL)) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
- INIT_LIST_HEAD(&glob->swap_lru[i]);
- INIT_LIST_HEAD(&glob->device_list);
- atomic_set(&glob->bo_count, 0);
-
- ret = kobject_init_and_add(
- &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
- if (unlikely(ret != 0))
- kobject_put(&glob->kobj);
-out:
- mutex_unlock(&ttm_global_mutex);
- return ret;
-}
-
-int ttm_bo_device_release(struct ttm_bo_device *bdev)
-{
- struct ttm_bo_global *glob = &ttm_bo_glob;
- int ret = 0;
- unsigned i;
- struct ttm_resource_manager *man;
-
- man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
- ttm_resource_manager_set_used(man, false);
- ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
-
- mutex_lock(&ttm_global_mutex);
- list_del(&bdev->device_list);
- mutex_unlock(&ttm_global_mutex);
-
- cancel_delayed_work_sync(&bdev->wq);
-
- if (ttm_bo_delayed_delete(bdev, true))
- pr_debug("Delayed destroy list was clean\n");
-
- spin_lock(&glob->lru_lock);
- for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
- if (list_empty(&man->lru[0]))
- pr_debug("Swap list %d was clean\n", i);
- spin_unlock(&glob->lru_lock);
-
- ttm_pool_fini(&bdev->pool);
-
- if (!ret)
- ttm_bo_global_release();
-
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_device_release);
-
-static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
-{
- struct ttm_resource_manager *man = &bdev->sysman;
-
- /*
- * Initialize the system memory buffer type.
- * Other types need to be driver / IOCTL initialized.
- */
- man->use_tt = true;
-
- ttm_resource_manager_init(man, 0);
- ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
- ttm_resource_manager_set_used(man, true);
-}
-
-int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_driver *driver,
- struct device *dev,
- struct address_space *mapping,
- struct drm_vma_offset_manager *vma_manager,
- bool use_dma_alloc, bool use_dma32)
-{
- struct ttm_bo_global *glob = &ttm_bo_glob;
- int ret;
-
- if (WARN_ON(vma_manager == NULL))
- return -EINVAL;
-
- ret = ttm_bo_global_init();
- if (ret)
- return ret;
-
- bdev->driver = driver;
-
- ttm_bo_init_sysman(bdev);
- ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
-
- bdev->vma_manager = vma_manager;
- INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
- INIT_LIST_HEAD(&bdev->ddestroy);
- bdev->dev_mapping = mapping;
- mutex_lock(&ttm_global_mutex);
- list_add_tail(&bdev->device_list, &glob->device_list);
- mutex_unlock(&ttm_global_mutex);
-
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_device_init);
-
/*
* buffer object vm functions.
*/
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
ttm_mem_io_free(bdev, &bo->mem);
@@ -1421,9 +1197,9 @@ EXPORT_SYMBOL(ttm_bo_wait);
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
*/
-int ttm_bo_swapout(struct ttm_operation_ctx *ctx)
+int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
{
- struct ttm_bo_global *glob = &ttm_bo_glob;
+ struct ttm_global *glob = &ttm_glob;
struct ttm_buffer_object *bo;
int ret = -EBUSY;
bool locked;
@@ -1501,10 +1277,10 @@ int ttm_bo_swapout(struct ttm_operation_ctx *ctx)
* anyone tries to access a ttm page.
*/
- if (bo->bdev->driver->swap_notify)
- bo->bdev->driver->swap_notify(bo);
+ if (bo->bdev->funcs->swap_notify)
+ bo->bdev->funcs->swap_notify(bo);
- ret = ttm_tt_swapout(bo->bdev, bo->ttm);
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm, gfp_flags);
out:
/**
@@ -1527,4 +1303,3 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}
-
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 398d5013fc39..031e5819fec4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -46,33 +46,33 @@ struct ttm_transfer_obj {
struct ttm_buffer_object *bo;
};
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+int ttm_mem_io_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
if (mem->bus.offset || mem->bus.addr)
return 0;
mem->bus.is_iomem = false;
- if (!bdev->driver->io_mem_reserve)
+ if (!bdev->funcs->io_mem_reserve)
return 0;
- return bdev->driver->io_mem_reserve(bdev, mem);
+ return bdev->funcs->io_mem_reserve(bdev, mem);
}
-void ttm_mem_io_free(struct ttm_bo_device *bdev,
+void ttm_mem_io_free(struct ttm_device *bdev,
struct ttm_resource *mem)
{
if (!mem->bus.offset && !mem->bus.addr)
return;
- if (bdev->driver->io_mem_free)
- bdev->driver->io_mem_free(bdev, mem);
+ if (bdev->funcs->io_mem_free)
+ bdev->funcs->io_mem_free(bdev, mem);
mem->bus.offset = 0;
mem->bus.addr = NULL;
}
-static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
+static int ttm_resource_ioremap(struct ttm_device *bdev,
struct ttm_resource *mem,
void **virtual)
{
@@ -102,7 +102,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
return 0;
}
-static void ttm_resource_iounmap(struct ttm_bo_device *bdev,
+static void ttm_resource_iounmap(struct ttm_device *bdev,
struct ttm_resource *mem,
void *virtual)
{
@@ -172,7 +172,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
struct ttm_tt *ttm = bo->ttm;
struct ttm_resource *old_mem = &bo->mem;
@@ -300,7 +300,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- atomic_inc(&ttm_bo_glob.bo_count);
+ atomic_inc(&ttm_glob.bo_count);
INIT_LIST_HEAD(&fbo->base.ddestroy);
INIT_LIST_HEAD(&fbo->base.lru);
INIT_LIST_HEAD(&fbo->base.swap);
@@ -309,7 +309,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
- fbo->base.acc_size = 0;
fbo->base.pin_count = 0;
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
@@ -602,7 +601,7 @@ static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo,
static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
struct dma_fence *fence)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
/**
@@ -628,7 +627,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
bool pipeline,
struct ttm_resource *new_mem)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
int ret = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6dc96cf66744..b31b18058965 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -95,10 +95,10 @@ out_unlock:
static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
- if (bdev->driver->io_mem_pfn)
- return bdev->driver->io_mem_pfn(bo, page_offset);
+ if (bdev->funcs->io_mem_pfn)
+ return bdev->funcs->io_mem_pfn(bo, page_offset);
return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset;
}
@@ -216,7 +216,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
goto out_fallback;
}
- } else if (bo->bdev->driver->io_mem_pfn) {
+ } else if (bo->bdev->funcs->io_mem_pfn) {
for (i = 1; i < fault_page_size; ++i) {
if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i)
goto out_fallback;
@@ -278,7 +278,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
{
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = vma->vm_private_data;
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
unsigned long page_offset;
unsigned long page_last;
unsigned long pfn;
@@ -488,8 +488,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
break;
default:
- if (bo->bdev->driver->access_memory)
- ret = bo->bdev->driver->access_memory(
+ if (bo->bdev->funcs->access_memory)
+ ret = bo->bdev->funcs->access_memory(
bo, offset, buf, len, write);
else
ret = -EIO;
@@ -508,7 +508,7 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.access = ttm_bo_vm_access,
};
-static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_device *bdev,
unsigned long offset,
unsigned long pages)
{
@@ -555,9 +555,8 @@ static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_s
}
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_bo_device *bdev)
+ struct ttm_device *bdev)
{
- struct ttm_bo_driver *driver;
struct ttm_buffer_object *bo;
int ret;
@@ -568,12 +567,11 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
if (unlikely(!bo))
return -EINVAL;
- driver = bo->bdev->driver;
- if (unlikely(!driver->verify_access)) {
+ if (unlikely(!bo->bdev->funcs->verify_access)) {
ret = -EPERM;
goto out_unref;
}
- ret = driver->verify_access(bo, filp);
+ ret = bo->bdev->funcs->verify_access(bo, filp);
if (unlikely(ret != 0))
goto out_unref;
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
new file mode 100644
index 000000000000..95e1b7b1f2e6
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#define pr_fmt(fmt) "[TTM DEVICE] " fmt
+
+#include <linux/mm.h>
+
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_bo_api.h>
+
+#include "ttm_module.h"
+
+/**
+ * ttm_global_mutex - protecting the global state
+ */
+DEFINE_MUTEX(ttm_global_mutex);
+unsigned ttm_glob_use_count;
+struct ttm_global ttm_glob;
+EXPORT_SYMBOL(ttm_glob);
+
+static void ttm_global_release(void)
+{
+ struct ttm_global *glob = &ttm_glob;
+
+ mutex_lock(&ttm_global_mutex);
+ if (--ttm_glob_use_count > 0)
+ goto out;
+
+ ttm_pool_mgr_fini();
+ ttm_tt_mgr_fini();
+
+ __free_page(glob->dummy_read_page);
+ memset(glob, 0, sizeof(*glob));
+out:
+ mutex_unlock(&ttm_global_mutex);
+}
+
+static int ttm_global_init(void)
+{
+ struct ttm_global *glob = &ttm_glob;
+ unsigned long num_pages;
+ struct sysinfo si;
+ int ret = 0;
+ unsigned i;
+
+ mutex_lock(&ttm_global_mutex);
+ if (++ttm_glob_use_count > 1)
+ goto out;
+
+ si_meminfo(&si);
+
+ /* Limit the number of pages in the pool to about 50% of the total
+ * system memory.
+ */
+ num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
+ ttm_pool_mgr_init(num_pages * 50 / 100);
+ ttm_tt_mgr_init();
+
+ spin_lock_init(&glob->lru_lock);
+ glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+
+ if (unlikely(glob->dummy_read_page == NULL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ INIT_LIST_HEAD(&glob->swap_lru[i]);
+ INIT_LIST_HEAD(&glob->device_list);
+ atomic_set(&glob->bo_count, 0);
+
+ debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
+ &glob->bo_count);
+out:
+ mutex_unlock(&ttm_global_mutex);
+ return ret;
+}
+
+static void ttm_init_sysman(struct ttm_device *bdev)
+{
+ struct ttm_resource_manager *man = &bdev->sysman;
+
+ /*
+ * Initialize the system memory buffer type.
+ * Other types need to be driver / IOCTL initialized.
+ */
+ man->use_tt = true;
+
+ ttm_resource_manager_init(man, 0);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
+ ttm_resource_manager_set_used(man, true);
+}
+
+static void ttm_device_delayed_workqueue(struct work_struct *work)
+{
+ struct ttm_device *bdev =
+ container_of(work, struct ttm_device, wq.work);
+
+ if (!ttm_bo_delayed_delete(bdev, false))
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+}
+
+/**
+ * ttm_device_init
+ *
+ * @bdev: A pointer to a struct ttm_device to initialize.
+ * @funcs: Function table for the device.
+ * @dev: The core kernel device pointer for DMA mappings and allocations.
+ * @mapping: The address space to use for this bo.
+ * @vma_manager: A pointer to a vma manager.
+ * @use_dma_alloc: If coherent DMA allocation API should be used.
+ * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
+ *
+ * Initializes a struct ttm_device:
+ * Returns:
+ * !0: Failure.
+ */
+int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
+ struct device *dev, struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
+ bool use_dma_alloc, bool use_dma32)
+{
+ struct ttm_global *glob = &ttm_glob;
+ int ret;
+
+ if (WARN_ON(vma_manager == NULL))
+ return -EINVAL;
+
+ ret = ttm_global_init();
+ if (ret)
+ return ret;
+
+ bdev->funcs = funcs;
+
+ ttm_init_sysman(bdev);
+ ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
+
+ bdev->vma_manager = vma_manager;
+ INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
+ INIT_LIST_HEAD(&bdev->ddestroy);
+ bdev->dev_mapping = mapping;
+ mutex_lock(&ttm_global_mutex);
+ list_add_tail(&bdev->device_list, &glob->device_list);
+ mutex_unlock(&ttm_global_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_device_init);
+
+void ttm_device_fini(struct ttm_device *bdev)
+{
+ struct ttm_global *glob = &ttm_glob;
+ struct ttm_resource_manager *man;
+ unsigned i;
+
+ man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
+ ttm_resource_manager_set_used(man, false);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
+
+ mutex_lock(&ttm_global_mutex);
+ list_del(&bdev->device_list);
+ mutex_unlock(&ttm_global_mutex);
+
+ cancel_delayed_work_sync(&bdev->wq);
+
+ if (ttm_bo_delayed_delete(bdev, true))
+ pr_debug("Delayed destroy list was clean\n");
+
+ spin_lock(&glob->lru_lock);
+ for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
+ if (list_empty(&man->lru[0]))
+ pr_debug("Swap list %d was clean\n", i);
+ spin_unlock(&glob->lru_lock);
+
+ ttm_pool_fini(&bdev->pool);
+ ttm_global_release();
+}
+EXPORT_SYMBOL(ttm_device_fini);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 9fa36ed59429..690ab97d52b7 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -51,14 +51,14 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
if (list_empty(list))
return;
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
@@ -154,7 +154,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
if (list_empty(list))
return;
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
@@ -165,7 +165,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
}
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index c0906437cb1c..56b0efdba1a9 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -32,68 +32,22 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sched.h>
+#include <linux/debugfs.h>
#include <drm/drm_sysfs.h>
#include "ttm_module.h"
-static DECLARE_WAIT_QUEUE_HEAD(exit_q);
-static atomic_t device_released;
-
-static struct device_type ttm_drm_class_type = {
- .name = "ttm",
- /**
- * Add pm ops here.
- */
-};
-
-static void ttm_drm_class_device_release(struct device *dev)
-{
- atomic_set(&device_released, 1);
- wake_up_all(&exit_q);
-}
-
-static struct device ttm_drm_class_device = {
- .type = &ttm_drm_class_type,
- .release = &ttm_drm_class_device_release
-};
-
-struct kobject *ttm_get_kobj(void)
-{
- struct kobject *kobj = &ttm_drm_class_device.kobj;
- BUG_ON(kobj == NULL);
- return kobj;
-}
+struct dentry *ttm_debugfs_root;
static int __init ttm_init(void)
{
- int ret;
-
- ret = dev_set_name(&ttm_drm_class_device, "ttm");
- if (unlikely(ret != 0))
- return ret;
-
- atomic_set(&device_released, 0);
- ret = drm_class_device_register(&ttm_drm_class_device);
- if (unlikely(ret != 0))
- goto out_no_dev_reg;
-
+ ttm_debugfs_root = debugfs_create_dir("ttm", NULL);
return 0;
-out_no_dev_reg:
- atomic_set(&device_released, 1);
- wake_up_all(&exit_q);
- return ret;
}
static void __exit ttm_exit(void)
{
- drm_class_device_unregister(&ttm_drm_class_device);
-
- /**
- * Refuse to unload until the TTM device is released.
- * Not sure this is 100% needed.
- */
-
- wait_event(exit_q, atomic_read(&device_released) == 1);
+ debugfs_remove(ttm_debugfs_root);
}
module_init(ttm_init);
diff --git a/drivers/gpu/drm/ttm/ttm_module.h b/drivers/gpu/drm/ttm/ttm_module.h
index 45fa318c1585..d7cac5d4b835 100644
--- a/drivers/gpu/drm/ttm/ttm_module.h
+++ b/drivers/gpu/drm/ttm/ttm_module.h
@@ -31,10 +31,10 @@
#ifndef _TTM_MODULE_H_
#define _TTM_MODULE_H_
-#include <linux/kernel.h>
-struct kobject;
-
#define TTM_PFX "[TTM] "
-extern struct kobject *ttm_get_kobj(void);
+
+struct dentry;
+
+extern struct dentry *ttm_debugfs_root;
#endif /* _TTM_MODULE_H_ */
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 4eb6efb8b8c0..cb38b1a17b09 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
+#include <linux/sched/mm.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -43,6 +44,8 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_tt.h>
+#include "ttm_module.h"
+
/**
* struct ttm_pool_dma - Helper object for coherent DMA mappings
*
@@ -412,16 +415,10 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
caching = pages + (1 << order);
}
- r = ttm_mem_global_alloc_page(&ttm_mem_glob, p,
- (1 << order) * PAGE_SIZE,
- ctx);
- if (r)
- goto error_free_page;
-
if (dma_addr) {
r = ttm_pool_map(pool, order, p, &dma_addr);
if (r)
- goto error_global_free;
+ goto error_free_page;
}
num_pages -= 1 << order;
@@ -435,9 +432,6 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
return 0;
-error_global_free:
- ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE);
-
error_free_page:
ttm_pool_free_page(pool, tt->caching, order, p);
@@ -472,8 +466,6 @@ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
order = ttm_pool_page_order(pool, p);
num_pages = 1ULL << order;
- ttm_mem_global_free_page(&ttm_mem_glob, p,
- num_pages * PAGE_SIZE);
if (tt->dma_address)
ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
@@ -513,10 +505,12 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32;
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < MAX_ORDER; ++j)
- ttm_pool_type_init(&pool->caching[i].orders[j],
- pool, i, j);
+ if (use_dma_alloc) {
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+ for (j = 0; j < MAX_ORDER; ++j)
+ ttm_pool_type_init(&pool->caching[i].orders[j],
+ pool, i, j);
+ }
}
/**
@@ -531,9 +525,33 @@ void ttm_pool_fini(struct ttm_pool *pool)
{
unsigned int i, j;
- for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
- for (j = 0; j < MAX_ORDER; ++j)
- ttm_pool_type_fini(&pool->caching[i].orders[j]);
+ if (pool->use_dma_alloc) {
+ for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
+ for (j = 0; j < MAX_ORDER; ++j)
+ ttm_pool_type_fini(&pool->caching[i].orders[j]);
+ }
+}
+
+/* As long as pages are available make sure to release at least one */
+static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_freed = 0;
+
+ do
+ num_freed += ttm_pool_shrink();
+ while (!num_freed && atomic_long_read(&allocated_pages));
+
+ return num_freed;
+}
+
+/* Return the number of pages available or SHRINK_EMPTY if we have none */
+static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_pages = atomic_long_read(&allocated_pages);
+
+ return num_pages ? num_pages : SHRINK_EMPTY;
}
#ifdef CONFIG_DEBUG_FS
@@ -552,6 +570,17 @@ static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
return count;
}
+/* Print a nice header for the order */
+static void ttm_pool_debugfs_header(struct seq_file *m)
+{
+ unsigned int i;
+
+ seq_puts(m, "\t ");
+ for (i = 0; i < MAX_ORDER; ++i)
+ seq_printf(m, " ---%2u---", i);
+ seq_puts(m, "\n");
+}
+
/* Dump information about the different pool types */
static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
struct seq_file *m)
@@ -563,6 +592,35 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
seq_puts(m, "\n");
}
+/* Dump the total amount of allocated pages */
+static void ttm_pool_debugfs_footer(struct seq_file *m)
+{
+ seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
+ atomic_long_read(&allocated_pages), page_pool_size);
+}
+
+/* Dump the information for the global pools */
+static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
+{
+ ttm_pool_debugfs_header(m);
+
+ mutex_lock(&shrinker_lock);
+ seq_puts(m, "wc\t:");
+ ttm_pool_debugfs_orders(global_write_combined, m);
+ seq_puts(m, "uc\t:");
+ ttm_pool_debugfs_orders(global_uncached, m);
+ seq_puts(m, "wc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_write_combined, m);
+ seq_puts(m, "uc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_uncached, m);
+ mutex_unlock(&shrinker_lock);
+
+ ttm_pool_debugfs_footer(m);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_globals);
+
/**
* ttm_pool_debugfs - Debugfs dump function for a pool
*
@@ -575,23 +633,14 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- mutex_lock(&shrinker_lock);
-
- seq_puts(m, "\t ");
- for (i = 0; i < MAX_ORDER; ++i)
- seq_printf(m, " ---%2u---", i);
- seq_puts(m, "\n");
-
- seq_puts(m, "wc\t:");
- ttm_pool_debugfs_orders(global_write_combined, m);
- seq_puts(m, "uc\t:");
- ttm_pool_debugfs_orders(global_uncached, m);
+ if (!pool->use_dma_alloc) {
+ seq_puts(m, "unused\n");
+ return 0;
+ }
- seq_puts(m, "wc 32\t:");
- ttm_pool_debugfs_orders(global_dma32_write_combined, m);
- seq_puts(m, "uc 32\t:");
- ttm_pool_debugfs_orders(global_dma32_uncached, m);
+ ttm_pool_debugfs_header(m);
+ mutex_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
seq_puts(m, "DMA ");
switch (i) {
@@ -607,39 +656,28 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
}
ttm_pool_debugfs_orders(pool->caching[i].orders, m);
}
-
- seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
- atomic_long_read(&allocated_pages), page_pool_size);
-
mutex_unlock(&shrinker_lock);
+ ttm_pool_debugfs_footer(m);
return 0;
}
EXPORT_SYMBOL(ttm_pool_debugfs);
-#endif
-
-/* As long as pages are available make sure to release at least one */
-static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
- struct shrink_control *sc)
+/* Test the shrinker functions and dump the result */
+static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
{
- unsigned long num_freed = 0;
+ struct shrink_control sc = { .gfp_mask = GFP_NOFS };
- do
- num_freed += ttm_pool_shrink();
- while (!num_freed && atomic_long_read(&allocated_pages));
+ fs_reclaim_acquire(GFP_KERNEL);
+ seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(&mm_shrinker, &sc),
+ ttm_pool_shrinker_scan(&mm_shrinker, &sc));
+ fs_reclaim_release(GFP_KERNEL);
- return num_freed;
+ return 0;
}
+DEFINE_SHOW_ATTRIBUTE(ttm_pool_debugfs_shrink);
-/* Return the number of pages available or SHRINK_EMPTY if we have none */
-static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- unsigned long num_pages = atomic_long_read(&allocated_pages);
-
- return num_pages ? num_pages : SHRINK_EMPTY;
-}
+#endif
/**
* ttm_pool_mgr_init - Initialize globals
@@ -669,6 +707,13 @@ int ttm_pool_mgr_init(unsigned long num_pages)
ttm_uncached, i);
}
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("page_pool", 0444, ttm_debugfs_root, NULL,
+ &ttm_pool_debugfs_globals_fops);
+ debugfs_create_file("page_pool_shrink", 0400, ttm_debugfs_root, NULL,
+ &ttm_pool_debugfs_shrink_fops);
+#endif
+
mm_shrinker.count_objects = ttm_pool_shrinker_count;
mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
mm_shrinker.seeks = 1;
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index a39305f742da..707e5c152896 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -111,7 +111,7 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
static const struct ttm_resource_manager_func ttm_range_manager_func;
-int ttm_range_man_init(struct ttm_bo_device *bdev,
+int ttm_range_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size)
{
@@ -138,7 +138,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_range_man_init);
-int ttm_range_man_fini(struct ttm_bo_device *bdev,
+int ttm_range_man_fini(struct ttm_device *bdev,
unsigned type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index b60699bf4816..ed1672a9f332 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -83,7 +83,7 @@ EXPORT_SYMBOL(ttm_resource_manager_init);
* Evict all the objects out of a memory manager until it is empty.
* Part of memory manager cleanup sequence.
*/
-int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
+int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_resource_manager *man)
{
struct ttm_operation_ctx ctx = {
@@ -91,7 +91,7 @@ int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
.no_wait_gpu = false,
.force_alloc = true
};
- struct ttm_bo_global *glob = &ttm_bo_glob;
+ struct ttm_global *glob = &ttm_glob;
struct dma_fence *fence;
int ret;
unsigned i;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 7f75a13163f0..2f0833c98d2c 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -38,12 +38,17 @@
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
+#include "ttm_module.h"
+
+static struct shrinker mm_shrinker;
+static atomic_long_t swapable_pages;
+
/*
* Allocates a ttm structure for the given BO.
*/
int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
uint32_t page_flags = 0;
dma_resv_assert_held(bo->base.resv);
@@ -66,7 +71,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
return -EINVAL;
}
- bo->ttm = bdev->driver->ttm_tt_create(bo, page_flags);
+ bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
if (unlikely(bo->ttm == NULL))
return -ENOMEM;
@@ -108,7 +113,7 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0;
}
-void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_unpopulate(bdev, ttm);
@@ -119,9 +124,9 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_tt_destroy_common);
-void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
- bdev->driver->ttm_tt_destroy(bdev, ttm);
+ bdev->funcs->ttm_tt_destroy(bdev, ttm);
}
static void ttm_tt_init_fields(struct ttm_tt *ttm,
@@ -223,32 +228,41 @@ out_err:
return ret;
}
-int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+/**
+ * ttm_tt_swapout - swap out tt object
+ *
+ * @bdev: TTM device structure.
+ * @ttm: The struct ttm_tt.
+ * @gfp_flags: Flags to use for memory allocation.
+ *
+ * Swapout a TT object to a shmem_file, return number of pages swapped out or
+ * negative error code.
+ */
+int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
+ gfp_t gfp_flags)
{
+ loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT;
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- gfp_t gfp_mask;
int i, ret;
- swap_storage = shmem_file_setup("ttm swap",
- ttm->num_pages << PAGE_SHIFT,
- 0);
+ swap_storage = shmem_file_setup("ttm swap", size, 0);
if (IS_ERR(swap_storage)) {
pr_err("Failed allocating swap storage\n");
return PTR_ERR(swap_storage);
}
swap_space = swap_storage->f_mapping;
- gfp_mask = mapping_gfp_mask(swap_space);
+ gfp_flags &= mapping_gfp_mask(swap_space);
for (i = 0; i < ttm->num_pages; ++i) {
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
- to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
+ to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_flags);
if (IS_ERR(to_page)) {
ret = PTR_ERR(to_page);
goto out_err;
@@ -263,7 +277,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
- return 0;
+ return ttm->num_pages;
out_err:
fput(swap_storage);
@@ -271,7 +285,7 @@ out_err:
return ret;
}
-static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
{
pgoff_t i;
@@ -280,9 +294,11 @@ static void ttm_tt_add_mapping(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i)
ttm->pages[i]->mapping = bdev->dev_mapping;
+
+ atomic_long_add(ttm->num_pages, &swapable_pages);
}
-int ttm_tt_populate(struct ttm_bo_device *bdev,
+int ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
int ret;
@@ -293,8 +309,8 @@ int ttm_tt_populate(struct ttm_bo_device *bdev,
if (ttm_tt_is_populated(ttm))
return 0;
- if (bdev->driver->ttm_tt_populate)
- ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
+ if (bdev->funcs->ttm_tt_populate)
+ ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx);
else
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
if (ret)
@@ -326,18 +342,91 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
(*page)->mapping = NULL;
(*page++)->index = 0;
}
+
+ atomic_long_sub(ttm->num_pages, &swapable_pages);
}
-void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+void ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
if (!ttm_tt_is_populated(ttm))
return;
ttm_tt_clear_mapping(ttm);
- if (bdev->driver->ttm_tt_unpopulate)
- bdev->driver->ttm_tt_unpopulate(bdev, ttm);
+ if (bdev->funcs->ttm_tt_unpopulate)
+ bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_free(&bdev->pool, ttm);
ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
}
+
+/* As long as pages are available make sure to release at least one */
+static unsigned long ttm_tt_shrinker_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct ttm_operation_ctx ctx = {
+ .no_wait_gpu = false
+ };
+ int ret;
+
+ ret = ttm_bo_swapout(&ctx, GFP_NOFS);
+ return ret < 0 ? SHRINK_EMPTY : ret;
+}
+
+/* Return the number of pages available or SHRINK_EMPTY if we have none */
+static unsigned long ttm_tt_shrinker_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ unsigned long num_pages;
+
+ num_pages = atomic_long_read(&swapable_pages);
+ return num_pages ? num_pages : SHRINK_EMPTY;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+/* Test the shrinker functions and dump the result */
+static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
+{
+ struct shrink_control sc = { .gfp_mask = GFP_KERNEL };
+
+ fs_reclaim_acquire(GFP_KERNEL);
+ seq_printf(m, "%lu/%lu\n", ttm_tt_shrinker_count(&mm_shrinker, &sc),
+ ttm_tt_shrinker_scan(&mm_shrinker, &sc));
+ fs_reclaim_release(GFP_KERNEL);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
+
+#endif
+
+
+
+/**
+ * ttm_tt_mgr_init - register with the MM shrinker
+ *
+ * Register with the MM shrinker for swapping out BOs.
+ */
+int ttm_tt_mgr_init(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
+ &ttm_tt_debugfs_shrink_fops);
+#endif
+
+ mm_shrinker.count_objects = ttm_tt_shrinker_count;
+ mm_shrinker.scan_objects = ttm_tt_shrinker_scan;
+ mm_shrinker.seeks = 1;
+ return register_shrinker(&mm_shrinker);
+}
+
+/**
+ * ttm_tt_mgr_fini - unregister our MM shrinker
+ *
+ * Unregisters the MM shrinker.
+ */
+void ttm_tt_mgr_fini(void)
+{
+ unregister_shrinker(&mm_shrinker);
+}
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index cb0e837d3dba..50e1fb71869f 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -17,8 +17,8 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_vblank.h>
@@ -316,7 +316,7 @@ static const struct drm_simple_display_pipe_funcs tve200_display_funcs = {
.enable = tve200_display_enable,
.disable = tve200_display_disable,
.update = tve200_display_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
.enable_vblank = tve200_display_enable_vblank,
.disable_vblank = tve200_display_disable_vblank,
};
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 9d34ec9d03f6..8d98bf69d075 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -15,6 +15,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
@@ -266,18 +267,17 @@ static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
return 0;
}
-static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
- int width, int height)
+static int udl_handle_damage(struct drm_framebuffer *fb, const struct dma_buf_map *map,
+ int x, int y, int width, int height)
{
struct drm_device *dev = fb->dev;
struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
+ void *vaddr = map->vaddr; /* TODO: Use mapping abstraction properly */
int i, ret, tmp_ret;
char *cmd;
struct urb *urb;
struct drm_rect clip;
int log_bpp;
- struct dma_buf_map map;
- void *vaddr;
ret = udl_log_cpp(fb->format->cpp[0]);
if (ret < 0)
@@ -297,17 +297,10 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
return ret;
}
- ret = drm_gem_shmem_vmap(fb->obj[0], &map);
- if (ret) {
- DRM_ERROR("failed to vmap fb\n");
- goto out_dma_buf_end_cpu_access;
- }
- vaddr = map.vaddr; /* TODO: Use mapping abstraction properly */
-
urb = udl_get_urb(dev);
if (!urb) {
ret = -ENOMEM;
- goto out_drm_gem_shmem_vunmap;
+ goto out_dma_buf_end_cpu_access;
}
cmd = urb->transfer_buffer;
@@ -320,7 +313,7 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
&cmd, byte_offset, dev_byte_offset,
byte_width);
if (ret)
- goto out_drm_gem_shmem_vunmap;
+ goto out_dma_buf_end_cpu_access;
}
if (cmd > (char *)urb->transfer_buffer) {
@@ -336,8 +329,6 @@ static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
ret = 0;
-out_drm_gem_shmem_vunmap:
- drm_gem_shmem_vunmap(fb->obj[0], &map);
out_dma_buf_end_cpu_access:
if (import_attach) {
tmp_ret = dma_buf_end_cpu_access(import_attach->dmabuf,
@@ -375,6 +366,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_framebuffer *fb = plane_state->fb;
struct udl_device *udl = to_udl(dev);
struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
char *buf;
char *wrptr;
int color_depth = UDL_COLOR_DEPTH_16BPP;
@@ -400,7 +392,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
udl->mode_buf_len = wrptr - buf;
- udl_handle_damage(fb, 0, 0, fb->width, fb->height);
+ udl_handle_damage(fb, &shadow_plane_state->map[0], 0, 0, fb->width, fb->height);
if (!crtc_state->mode_changed)
return;
@@ -435,6 +427,7 @@ udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state)
{
struct drm_plane_state *state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
@@ -442,17 +435,16 @@ udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
return;
if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect))
- udl_handle_damage(fb, rect.x1, rect.y1, rect.x2 - rect.x1,
- rect.y2 - rect.y1);
+ udl_handle_damage(fb, &shadow_plane_state->map[0], rect.x1, rect.y1,
+ rect.x2 - rect.x1, rect.y2 - rect.y1);
}
-static const
-struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
+static const struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
.mode_valid = udl_simple_display_pipe_mode_valid,
.enable = udl_simple_display_pipe_enable,
.disable = udl_simple_display_pipe_disable,
.update = udl_simple_display_pipe_update,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
};
/*
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 452682e2209f..8992480c88fa 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -259,7 +259,7 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
return NULL;
}
-static void
+static enum drm_gpu_sched_stat
v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
{
enum v3d_queue q;
@@ -285,6 +285,8 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
}
mutex_unlock(&v3d->reset_lock);
+
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
/* If the current address or return address have changed, then the GPU
@@ -292,7 +294,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
* could fail if the GPU got in an infinite loop in the CL, but that
* is pretty unlikely outside of an i-g-t testcase.
*/
-static void
+static enum drm_gpu_sched_stat
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra)
{
@@ -304,39 +306,39 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca;
*timedout_ctra = ctra;
- return;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
- v3d_gpu_reset_for_timeout(v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(v3d, sched_job);
}
-static void
+static enum drm_gpu_sched_stat
v3d_bin_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
- v3d_cl_job_timedout(sched_job, V3D_BIN,
- &job->timedout_ctca, &job->timedout_ctra);
+ return v3d_cl_job_timedout(sched_job, V3D_BIN,
+ &job->timedout_ctca, &job->timedout_ctra);
}
-static void
+static enum drm_gpu_sched_stat
v3d_render_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_render_job *job = to_render_job(sched_job);
- v3d_cl_job_timedout(sched_job, V3D_RENDER,
- &job->timedout_ctca, &job->timedout_ctra);
+ return v3d_cl_job_timedout(sched_job, V3D_RENDER,
+ &job->timedout_ctca, &job->timedout_ctra);
}
-static void
+static enum drm_gpu_sched_stat
v3d_generic_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
- v3d_gpu_reset_for_timeout(job->v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(job->v3d, sched_job);
}
-static void
+static enum drm_gpu_sched_stat
v3d_csd_job_timedout(struct drm_sched_job *sched_job)
{
struct v3d_csd_job *job = to_csd_job(sched_job);
@@ -348,10 +350,10 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
*/
if (job->timedout_batches != batches) {
job->timedout_batches = batches;
- return;
+ return DRM_GPU_SCHED_STAT_NOMINAL;
}
- v3d_gpu_reset_for_timeout(v3d, sched_job);
+ return v3d_gpu_reset_for_timeout(v3d, sched_job);
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
@@ -401,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_bin_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
- "v3d_bin");
+ NULL, "v3d_bin");
if (ret) {
dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
return ret;
@@ -411,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_render_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
- "v3d_render");
+ NULL, "v3d_render");
if (ret) {
dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
ret);
@@ -423,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_tfu_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
- "v3d_tfu");
+ NULL, "v3d_tfu");
if (ret) {
dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
ret);
@@ -436,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_csd_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
- "v3d_csd");
+ NULL, "v3d_csd");
if (ret) {
dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
ret);
@@ -448,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d)
&v3d_cache_clean_sched_ops,
hw_jobs_limit, job_hang_limit,
msecs_to_jiffies(hang_limit_ms),
- "v3d_cache_clean");
+ NULL, "v3d_cache_clean");
if (ret) {
dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
ret);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index dbc0dd53c69e..964381d55fc1 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
@@ -252,13 +253,15 @@ static const struct drm_crtc_funcs vbox_crtc_funcs = {
};
static int vbox_primary_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state = NULL;
if (new_state->crtc) {
- crtc_state = drm_atomic_get_existing_crtc_state(
- new_state->state, new_state->crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
}
@@ -270,22 +273,24 @@ static int vbox_primary_atomic_check(struct drm_plane *plane,
}
static void vbox_primary_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_crtc *crtc = plane->state->crtc;
- struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_crtc *crtc = new_state->crtc;
+ struct drm_framebuffer *fb = new_state->fb;
struct vbox_private *vbox = to_vbox_dev(fb->dev);
struct drm_mode_rect *clips;
uint32_t num_clips, i;
vbox_crtc_set_base_and_mode(crtc, fb,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16);
+ new_state->src_x >> 16,
+ new_state->src_y >> 16);
/* Send information about dirty rectangles to VBVA. */
- clips = drm_plane_get_damage_clips(plane->state);
- num_clips = drm_plane_get_damage_clips_count(plane->state);
+ clips = drm_plane_get_damage_clips(new_state);
+ num_clips = drm_plane_get_damage_clips_count(new_state);
if (!num_clips)
return;
@@ -314,8 +319,10 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
}
static void vbox_primary_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct drm_crtc *crtc = old_state->crtc;
/* vbox_do_modeset checks plane->state->fb and will disable if NULL */
@@ -325,16 +332,18 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane,
}
static int vbox_cursor_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state = NULL;
u32 width = new_state->crtc_w;
u32 height = new_state->crtc_h;
int ret;
if (new_state->crtc) {
- crtc_state = drm_atomic_get_existing_crtc_state(
- new_state->state, new_state->crtc);
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
+ new_state->crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
}
@@ -375,20 +384,24 @@ static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
}
static void vbox_cursor_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct vbox_private *vbox =
container_of(plane->dev, struct vbox_private, ddev);
- struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
- struct drm_framebuffer *fb = plane->state->fb;
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
- u32 width = plane->state->crtc_w;
- u32 height = plane->state->crtc_h;
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(new_state->crtc);
+ struct drm_framebuffer *fb = new_state->fb;
+ u32 width = new_state->crtc_w;
+ u32 height = new_state->crtc_h;
+ struct drm_shadow_plane_state *shadow_plane_state =
+ to_drm_shadow_plane_state(new_state);
+ struct dma_buf_map map = shadow_plane_state->map[0];
+ u8 *src = map.vaddr; /* TODO: Use mapping abstraction properly */
size_t data_size, mask_size;
u32 flags;
- struct dma_buf_map map;
- int ret;
- u8 *src;
/*
* VirtualBox uses the host windowing system to draw the cursor so
@@ -401,17 +414,6 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
vbox_crtc->cursor_enabled = true;
- ret = drm_gem_vram_vmap(gbo, &map);
- if (ret) {
- /*
- * BUG: we should have pinned the BO in prepare_fb().
- */
- mutex_unlock(&vbox->hw_mutex);
- DRM_WARN("Could not map cursor bo, skipping update\n");
- return;
- }
- src = map.vaddr; /* TODO: Use mapping abstraction properly */
-
/*
* The mask must be calculated based on the alpha
* channel, one bit per ARGB word, and must be 32-bit
@@ -421,7 +423,6 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
data_size = width * height * 4 + mask_size;
copy_cursor_image(src, vbox->cursor_data, width, height, mask_size);
- drm_gem_vram_vunmap(gbo, &map);
flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
VBOX_MOUSE_POINTER_ALPHA;
@@ -434,8 +435,10 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane,
}
static void vbox_cursor_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct vbox_private *vbox =
container_of(plane->dev, struct vbox_private, ddev);
struct vbox_crtc *vbox_crtc = to_vbox_crtc(old_state->crtc);
@@ -466,17 +469,14 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
.atomic_check = vbox_cursor_atomic_check,
.atomic_update = vbox_cursor_atomic_update,
.atomic_disable = vbox_cursor_atomic_disable,
- .prepare_fb = drm_gem_vram_plane_helper_prepare_fb,
- .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb,
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
};
static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_primary_helper_destroy,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
static const u32 vbox_primary_plane_formats[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index f09254c2497d..bb5529a7a9c2 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -363,9 +363,8 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state =
to_vc4_crtc_state(old_crtc_state);
- struct drm_crtc_commit *commit;
unsigned int channel = vc4_crtc_state->assigned_channel;
- unsigned long done;
+ int ret;
if (channel == VC4_HVS_CHANNEL_DISABLED)
continue;
@@ -373,17 +372,9 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
if (!old_hvs_state->fifo_state[channel].in_use)
continue;
- commit = old_hvs_state->fifo_state[i].pending_commit;
- if (!commit)
- continue;
-
- done = wait_for_completion_timeout(&commit->hw_done, 10 * HZ);
- if (!done)
- drm_err(dev, "Timed out waiting for hw_done\n");
-
- done = wait_for_completion_timeout(&commit->flip_done, 10 * HZ);
- if (!done)
- drm_err(dev, "Timed out waiting for flip_done\n");
+ ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[i].pending_commit);
+ if (ret)
+ drm_err(dev, "Timed out waiting for commit\n");
}
drm_atomic_helper_commit_modeset_disables(dev, state);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 7322169c0682..c76e73a452e0 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -20,7 +20,7 @@
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_plane_helper.h>
#include "uapi/drm/vc4_drm.h"
@@ -1055,25 +1055,27 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* in the CRTC's flush.
*/
static int vc4_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
- struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct vc4_plane_state *vc4_state = to_vc4_plane_state(new_plane_state);
int ret;
vc4_state->dlist_count = 0;
- if (!plane_enabled(state))
+ if (!plane_enabled(new_plane_state))
return 0;
- ret = vc4_plane_mode_set(plane, state);
+ ret = vc4_plane_mode_set(plane, new_plane_state);
if (ret)
return ret;
- return vc4_plane_allocate_lbm(state);
+ return vc4_plane_allocate_lbm(new_plane_state);
}
static void vc4_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
/* No contents here. Since we don't know where in the CRTC's
* dlist we should be stored, our dlist is uploaded to the
@@ -1133,32 +1135,34 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
}
static void vc4_plane_atomic_async_update(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct vc4_plane_state *vc4_state, *new_vc4_state;
- swap(plane->state->fb, state->fb);
- plane->state->crtc_x = state->crtc_x;
- plane->state->crtc_y = state->crtc_y;
- plane->state->crtc_w = state->crtc_w;
- plane->state->crtc_h = state->crtc_h;
- plane->state->src_x = state->src_x;
- plane->state->src_y = state->src_y;
- plane->state->src_w = state->src_w;
- plane->state->src_h = state->src_h;
- plane->state->src_h = state->src_h;
- plane->state->alpha = state->alpha;
- plane->state->pixel_blend_mode = state->pixel_blend_mode;
- plane->state->rotation = state->rotation;
- plane->state->zpos = state->zpos;
- plane->state->normalized_zpos = state->normalized_zpos;
- plane->state->color_encoding = state->color_encoding;
- plane->state->color_range = state->color_range;
- plane->state->src = state->src;
- plane->state->dst = state->dst;
- plane->state->visible = state->visible;
-
- new_vc4_state = to_vc4_plane_state(state);
+ swap(plane->state->fb, new_plane_state->fb);
+ plane->state->crtc_x = new_plane_state->crtc_x;
+ plane->state->crtc_y = new_plane_state->crtc_y;
+ plane->state->crtc_w = new_plane_state->crtc_w;
+ plane->state->crtc_h = new_plane_state->crtc_h;
+ plane->state->src_x = new_plane_state->src_x;
+ plane->state->src_y = new_plane_state->src_y;
+ plane->state->src_w = new_plane_state->src_w;
+ plane->state->src_h = new_plane_state->src_h;
+ plane->state->src_h = new_plane_state->src_h;
+ plane->state->alpha = new_plane_state->alpha;
+ plane->state->pixel_blend_mode = new_plane_state->pixel_blend_mode;
+ plane->state->rotation = new_plane_state->rotation;
+ plane->state->zpos = new_plane_state->zpos;
+ plane->state->normalized_zpos = new_plane_state->normalized_zpos;
+ plane->state->color_encoding = new_plane_state->color_encoding;
+ plane->state->color_range = new_plane_state->color_range;
+ plane->state->src = new_plane_state->src;
+ plane->state->dst = new_plane_state->dst;
+ plane->state->visible = new_plane_state->visible;
+
+ new_vc4_state = to_vc4_plane_state(new_plane_state);
vc4_state = to_vc4_plane_state(plane->state);
vc4_state->crtc_x = new_vc4_state->crtc_x;
@@ -1202,23 +1206,25 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane,
}
static int vc4_plane_atomic_async_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct vc4_plane_state *old_vc4_state, *new_vc4_state;
int ret;
u32 i;
- ret = vc4_plane_mode_set(plane, state);
+ ret = vc4_plane_mode_set(plane, new_plane_state);
if (ret)
return ret;
old_vc4_state = to_vc4_plane_state(plane->state);
- new_vc4_state = to_vc4_plane_state(state);
+ new_vc4_state = to_vc4_plane_state(new_plane_state);
if (old_vc4_state->dlist_count != new_vc4_state->dlist_count ||
old_vc4_state->pos0_offset != new_vc4_state->pos0_offset ||
old_vc4_state->pos2_offset != new_vc4_state->pos2_offset ||
old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset ||
- vc4_lbm_size(plane->state) != vc4_lbm_size(state))
+ vc4_lbm_size(plane->state) != vc4_lbm_size(new_plane_state))
return -EINVAL;
/* Only pos0, pos2 and ptr0 DWORDS can be updated in an async update
@@ -1250,7 +1256,7 @@ static int vc4_prepare_fb(struct drm_plane *plane,
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
- drm_gem_fb_prepare_fb(plane, state);
+ drm_gem_plane_helper_prepare_fb(plane, state);
if (plane->state->fb == state->fb)
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 42ac08ed1442..4e1b17548007 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -83,20 +83,23 @@ static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
};
static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR;
struct drm_crtc_state *crtc_state;
int ret;
- if (!state->fb || WARN_ON(!state->crtc))
+ if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
is_cursor, true);
@@ -127,8 +130,10 @@ static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev,
}
static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
@@ -239,8 +244,10 @@ static void virtio_gpu_cursor_cleanup_fb(struct drm_plane *plane,
}
static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct drm_device *dev = plane->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_output *output = NULL;
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 0443b7deeaef..6164349cdf11 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0+
+#include <linux/dma-fence.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
@@ -14,7 +16,9 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
struct drm_crtc *crtc = &output->crtc;
struct vkms_crtc_state *state;
u64 ret_overrun;
- bool ret;
+ bool ret, fence_cookie;
+
+ fence_cookie = dma_fence_begin_signalling();
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
@@ -49,6 +53,8 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
DRM_DEBUG_DRIVER("Composer worker already queued\n");
}
+ dma_fence_end_signalling(fence_cookie);
+
return HRTIMER_RESTART;
}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 0824327cc860..6d310d31b75d 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -5,6 +5,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_gem_shmem_helper.h>
@@ -92,20 +93,22 @@ static const struct drm_plane_funcs vkms_plane_funcs = {
};
static void vkms_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct vkms_plane_state *vkms_plane_state;
- struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_framebuffer *fb = new_state->fb;
struct vkms_composer *composer;
- if (!plane->state->crtc || !fb)
+ if (!new_state->crtc || !fb)
return;
- vkms_plane_state = to_vkms_plane_state(plane->state);
+ vkms_plane_state = to_vkms_plane_state(new_state);
composer = vkms_plane_state->composer;
- memcpy(&composer->src, &plane->state->src, sizeof(struct drm_rect));
- memcpy(&composer->dst, &plane->state->dst, sizeof(struct drm_rect));
+ memcpy(&composer->src, &new_state->src, sizeof(struct drm_rect));
+ memcpy(&composer->dst, &new_state->dst, sizeof(struct drm_rect));
memcpy(&composer->fb, fb, sizeof(struct drm_framebuffer));
drm_framebuffer_get(&composer->fb);
composer->offset = fb->offsets[0];
@@ -114,23 +117,26 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state;
bool can_position = false;
int ret;
- if (!state->fb || WARN_ON(!state->crtc))
+ if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_crtc_state(state,
+ new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
if (plane->type == DRM_PLANE_TYPE_CURSOR)
can_position = true;
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
can_position, true);
@@ -138,7 +144,7 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
return ret;
/* for now primary plane must be visible and full screen */
- if (!state->visible && !can_position)
+ if (!new_plane_state->visible && !can_position)
return -EINVAL;
return 0;
@@ -159,7 +165,7 @@ static int vkms_prepare_fb(struct drm_plane *plane,
if (ret)
DRM_ERROR("vmap failed: %d\n", ret);
- return drm_gem_fb_prepare_fb(plane, state);
+ return drm_gem_plane_helper_prepare_fb(plane, state);
}
static void vkms_cleanup_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 78fdc1d59186..0935686475a0 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -42,11 +42,8 @@ static int vkms_wb_encoder_atomic_check(struct drm_encoder *encoder,
}
if (fb->format->format != vkms_wb_formats[0]) {
- struct drm_format_name_buf format_name;
-
- DRM_DEBUG_KMS("Invalid pixel format %s\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ DRM_DEBUG_KMS("Invalid pixel format %p4cc\n",
+ &fb->format->format);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index cc4cdca7176e..8c02fa5852e7 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -9,7 +9,7 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
- ttm_object.o ttm_lock.o
+ ttm_object.o ttm_lock.o ttm_memory.o
vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c
index a3bfbd9cea68..e972af07d029 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c
@@ -28,7 +28,6 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_memory.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/wait.h>
@@ -36,9 +35,11 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/swap.h>
-#include <drm/ttm/ttm_pool.h>
-#include "ttm_module.h"
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+
+#include "ttm_memory.h"
#define TTM_MEMORY_ALLOC_RETRIES 4
@@ -276,9 +277,9 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
- ret = ttm_bo_swapout(ctx);
+ ret = ttm_bo_swapout(ctx, GFP_KERNEL);
spin_lock(&glob->lock);
- if (unlikely(ret != 0))
+ if (unlikely(ret < 0))
break;
}
@@ -413,7 +414,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
}
#endif
-int ttm_mem_global_init(struct ttm_mem_global *glob)
+int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev)
{
struct sysinfo si;
int ret;
@@ -423,8 +424,9 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
spin_lock_init(&glob->lock);
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
INIT_WORK(&glob->work, ttm_shrink_work);
- ret = kobject_init_and_add(
- &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
+
+ ret = kobject_init_and_add(&glob->kobj, &ttm_mem_glob_kobj_type,
+ &dev->kobj, "memory_accounting");
if (unlikely(ret != 0)) {
kobject_put(&glob->kobj);
return ret;
@@ -452,7 +454,6 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
zone->name, (unsigned long long)zone->max_mem >> 10);
}
- ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -464,9 +465,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
struct ttm_mem_zone *zone;
unsigned int i;
- /* let the page allocator first stop the shrink work. */
- ttm_pool_mgr_fini();
-
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
diff --git a/include/drm/ttm/ttm_memory.h b/drivers/gpu/drm/vmwgfx/ttm_memory.h
index c1f167881e33..c50dba774485 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_memory.h
@@ -35,7 +35,8 @@
#include <linux/errno.h>
#include <linux/kobject.h>
#include <linux/mm.h>
-#include "ttm_bo_api.h"
+
+#include <drm/ttm/ttm_bo_api.h>
/**
* struct ttm_mem_global - Global memory accounting structure.
@@ -79,7 +80,7 @@ extern struct ttm_mem_global {
#endif
} ttm_mem_glob;
-int ttm_mem_global_init(struct ttm_mem_global *glob);
+int ttm_mem_global_init(struct ttm_mem_global *glob, struct device *dev);
void ttm_mem_global_release(struct ttm_mem_global *glob);
int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
struct ttm_operation_ctx *ctx);
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 0fe869d0fad1..112394dd0ab6 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -42,6 +42,14 @@
*/
+#define pr_fmt(fmt) "[TTM] " fmt
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/atomic.h>
+#include "ttm_object.h"
+
/**
* struct ttm_object_file
*
@@ -55,16 +63,9 @@
*
* @ref_hash: Hash tables of ref objects, one per ttm_ref_type,
* for fast lookup of ref objects given a base object.
+ *
+ * @refcount: reference/usage count
*/
-
-#define pr_fmt(fmt) "[TTM] " fmt
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/atomic.h>
-#include "ttm_object.h"
-
struct ttm_object_file {
struct ttm_object_device *tdev;
spinlock_t lock;
@@ -73,7 +74,7 @@ struct ttm_object_file {
struct kref refcount;
};
-/**
+/*
* struct ttm_object_device
*
* @object_lock: lock that protects the object_hash hash table.
@@ -96,7 +97,7 @@ struct ttm_object_device {
struct idr idr;
};
-/**
+/*
* struct ttm_ref_object
*
* @hash: Hash entry for the per-file object reference hash.
@@ -568,7 +569,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
/**
* get_dma_buf_unless_doomed - get a dma_buf reference if possible.
*
- * @dma_buf: Non-refcounted pointer to a struct dma-buf.
+ * @dmabuf: Non-refcounted pointer to a struct dma-buf.
*
* Obtain a file reference from a lookup structure that doesn't refcount
* the file, but synchronizes with its release method to make sure it has
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.h b/drivers/gpu/drm/vmwgfx/ttm_object.h
index ede26df87c93..49b064f0cb19 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.h
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.h
@@ -43,7 +43,8 @@
#include <linux/rcupdate.h>
#include <drm/drm_hashtab.h>
-#include <drm/ttm/ttm_memory.h>
+
+#include "ttm_memory.h"
/**
* enum ttm_ref_type
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 180f6dbc9460..81f525a82b77 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -330,6 +330,8 @@ static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi)
*
* @cbs: Pointer to the context binding state tracker.
* @bi: Information about the binding to track.
+ * @shader_slot: The shader slot of the binding.
+ * @slot: The slot of the binding.
*
* Starts tracking the binding in the context binding
* state structure @cbs.
@@ -367,6 +369,7 @@ void vmw_binding_add_uav_index(struct vmw_ctx_binding_state *cbs, uint32 slot,
* vmw_binding_transfer: Transfer a context binding tracking entry.
*
* @cbs: Pointer to the persistent context binding state tracker.
+ * @from: Staged binding info built during execbuf
* @bi: Information about the binding to track.
*
*/
@@ -484,9 +487,8 @@ void vmw_binding_res_list_scrub(struct list_head *head)
/**
* vmw_binding_state_commit - Commit staged binding info
*
- * @ctx: Pointer to context to commit the staged binding info to.
+ * @to: Staged binding info area to copy into to.
* @from: Staged binding info built during execbuf.
- * @scrubbed: Transfer only scrubbed bindings.
*
* Transfers binding info from a temporary structure
* (typically used by execbuf) to the persistent
@@ -511,7 +513,7 @@ void vmw_binding_state_commit(struct vmw_ctx_binding_state *to,
/**
* vmw_binding_rebind_all - Rebind all scrubbed bindings of a context
*
- * @ctx: The context resource
+ * @cbs: Pointer to the context binding state tracker.
*
* Walks through the context binding list and rebinds all scrubbed
* resources.
@@ -789,6 +791,7 @@ static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
* vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
+ * @shader_slot: The shader slot of the binding.
*/
static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
int shader_slot)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 9f2779ddcf08..3a438ae4d3f4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -431,6 +431,7 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
* @src_stride: Source stride in bytes.
* @w: Width of blit.
* @h: Height of blit.
+ * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
* return: Zero on success. Negative error value on failure. Will print out
* kernel warnings on caller bugs.
*
@@ -465,13 +466,13 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
dma_resv_assert_held(src->base.resv);
if (!ttm_tt_is_populated(dst->ttm)) {
- ret = dst->bdev->driver->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
+ ret = dst->bdev->funcs->ttm_tt_populate(dst->bdev, dst->ttm, &ctx);
if (ret)
return ret;
}
if (!ttm_tt_is_populated(src->ttm)) {
- ret = src->bdev->driver->ttm_tt_populate(src->bdev, src->ttm, &ctx);
+ ret = src->bdev->funcs->ttm_tt_populate(src->bdev, src->ttm, &ctx);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 63dbc44eebe0..50e529a01677 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -131,7 +131,6 @@ err:
*
* @dev_priv: Driver private.
* @buf: DMA buffer to move.
- * @pin: Pin buffer if true.
* @interruptible: Use interruptible wait.
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
@@ -508,11 +507,16 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
acc_size = ttm_round_pot(sizeof(*bo));
acc_size += ttm_round_pot(npages * sizeof(void *));
acc_size += ttm_round_pot(sizeof(struct ttm_tt));
+
+ ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
+ if (unlikely(ret))
+ goto error_free;
+
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
ttm_bo_type_device, placement, 0,
- &ctx, acc_size, NULL, NULL, NULL);
+ &ctx, NULL, NULL, NULL);
if (unlikely(ret))
- goto error_free;
+ goto error_account;
ttm_bo_pin(bo);
ttm_bo_unreserve(bo);
@@ -520,6 +524,9 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
return 0;
+error_account:
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
+
error_free:
kfree(bo);
return ret;
@@ -546,7 +553,7 @@ int vmw_bo_init(struct vmw_private *dev_priv,
void (*bo_free)(struct ttm_buffer_object *bo))
{
struct ttm_operation_ctx ctx = { interruptible, false };
- struct ttm_bo_device *bdev = &dev_priv->bdev;
+ struct ttm_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret;
bool user = (bo_free == &vmw_user_bo_destroy);
@@ -559,11 +566,17 @@ int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT;
+ ret = ttm_mem_global_alloc(&ttm_mem_glob, acc_size, &ctx);
+ if (unlikely(ret))
+ return ret;
+
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
- 0, &ctx, acc_size, NULL, NULL, bo_free);
- if (unlikely(ret))
+ 0, &ctx, NULL, NULL, bo_free);
+ if (unlikely(ret)) {
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
return ret;
+ }
if (pin)
ttm_bo_pin(&vmw_bo->base);
@@ -635,6 +648,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
* @handle: Pointer to where the handle value should be assigned.
* @p_vbo: Pointer to where the refcounted struct vmw_buffer_object pointer
* should be assigned.
+ * @p_base: The TTM base object pointer about to be allocated.
* Return: Zero on success, negative error code on error.
*/
int vmw_user_bo_alloc(struct vmw_private *dev_priv,
@@ -1058,7 +1072,7 @@ int vmw_user_bo_reference(struct ttm_object_file *tfile,
void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence)
{
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 7400d617ae3c..20246a7c97c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -276,7 +276,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return ret;
}
-/**
+/*
* Reserve @bytes number of bytes in the fifo.
*
* This function will return NULL (error) on two conditions:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 45fbc41440f1..2e23e537cdf5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -48,6 +48,7 @@
* @hw_submitted: List of command buffers submitted to hardware.
* @preempted: List of preempted command buffers.
* @num_hw_submitted: Number of buffers currently being processed by hardware
+ * @block_submission: Identifies a block command submission.
*/
struct vmw_cmdbuf_context {
struct list_head submitted;
@@ -58,7 +59,7 @@ struct vmw_cmdbuf_context {
};
/**
- * struct vmw_cmdbuf_man: - Command buffer manager
+ * struct vmw_cmdbuf_man - Command buffer manager
*
* @cur_mutex: Mutex protecting the command buffer used for incremental small
* kernel command submissions, @cur.
@@ -88,7 +89,7 @@ struct vmw_cmdbuf_context {
* @max_hw_submitted: Max number of in-flight command buffers the device can
* handle. Immutable.
* @lock: Spinlock protecting command submission queues.
- * @header: Pool of DMA memory for device command buffer headers.
+ * @headers: Pool of DMA memory for device command buffer headers.
* Internal protection.
* @dheaders: Pool of DMA memory for device command buffer headers with trailing
* space for inline data. Internal protection.
@@ -143,7 +144,7 @@ struct vmw_cmdbuf_man {
* @cb_context: The device command buffer context.
* @list: List head for attaching to the manager lists.
* @node: The range manager node.
- * @handle. The DMA address of @cb_header. Handed to the device on command
+ * @handle: The DMA address of @cb_header. Handed to the device on command
* buffer submission.
* @cmd: Pointer to the command buffer space of this buffer.
* @size: Size of the command buffer space of this buffer.
@@ -249,7 +250,7 @@ static void vmw_cmdbuf_header_inline_free(struct vmw_cmdbuf_header *header)
* __vmw_cmdbuf_header_free - Free a struct vmw_cmdbuf_header and its
* associated structures.
*
- * header: Pointer to the header to free.
+ * @header: Pointer to the header to free.
*
* For internal use. Must be called with man::lock held.
*/
@@ -365,10 +366,11 @@ static void vmw_cmdbuf_ctx_submit(struct vmw_cmdbuf_man *man,
}
/**
- * vmw_cmdbuf_ctx_submit: Process a command buffer context.
+ * vmw_cmdbuf_ctx_process - Process a command buffer context.
*
* @man: The command buffer manager.
* @ctx: The command buffer context.
+ * @notempty: Pass back count of non-empty command submitted lists.
*
* Submit command buffers to hardware if possible, and process finished
* buffers. Typically freeing them, but on preemption or error take
@@ -1161,6 +1163,7 @@ static int vmw_cmdbuf_send_device_command(struct vmw_cmdbuf_man *man,
* context.
*
* @man: The command buffer manager.
+ * @context: Device context to pass command through.
*
* Synchronously sends a preempt command.
*/
@@ -1184,6 +1187,7 @@ static int vmw_cmdbuf_preempt(struct vmw_cmdbuf_man *man, u32 context)
* context.
*
* @man: The command buffer manager.
+ * @context: Device context to start/stop.
* @enable: Whether to enable or disable the context.
*
* Synchronously sends a device start / stop context command.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 44d858ce4ce7..b262d61d839d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -69,7 +69,7 @@ struct vmw_cmdbuf_res_manager {
* vmw_cmdbuf_res_lookup - Look up a command buffer resource
*
* @man: Pointer to the command buffer resource manager
- * @resource_type: The resource type, that combined with the user key
+ * @res_type: The resource type, that combined with the user key
* identifies the resource.
* @user_key: The user key.
*
@@ -148,7 +148,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
/**
* vmw_cmdbuf_res_revert - Revert a list of command buffer resource actions
*
- * @man: Pointer to the command buffer resource manager
* @list: Caller's list of command buffer resource action
*
* This function reverts a list of command buffer resource
@@ -160,7 +159,6 @@ void vmw_cmdbuf_res_commit(struct list_head *list)
void vmw_cmdbuf_res_revert(struct list_head *list)
{
struct vmw_cmdbuf_res *entry, *next;
- int ret;
list_for_each_entry_safe(entry, next, list, head) {
switch (entry->state) {
@@ -168,8 +166,7 @@ void vmw_cmdbuf_res_revert(struct list_head *list)
vmw_cmdbuf_res_free(entry->man, entry);
break;
case VMW_CMDBUF_RES_DEL:
- ret = drm_ht_insert_item(&entry->man->resources,
- &entry->hash);
+ drm_ht_insert_item(&entry->man->resources, &entry->hash);
list_del(&entry->head);
list_add_tail(&entry->head, &entry->man->list);
entry->state = VMW_CMDBUF_RES_COMMITTED;
@@ -327,7 +324,6 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
}
/**
- *
* vmw_cmdbuf_res_man_size - Return the size of a command buffer managed
* resource manager
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 6f4d0da11ad8..4a5a3e246216 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -112,7 +112,7 @@ static const struct vmw_res_func vmw_dx_context_func = {
.unbind = vmw_dx_context_unbind
};
-/**
+/*
* Context management:
*/
@@ -672,7 +672,7 @@ static int vmw_dx_context_destroy(struct vmw_resource *res)
return 0;
}
-/**
+/*
* User-space context management:
*/
@@ -698,7 +698,7 @@ static void vmw_user_context_free(struct vmw_resource *res)
vmw_user_context_size);
}
-/**
+/*
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index ba658fa9cf6c..42321b9c8129 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -63,6 +63,7 @@ struct vmw_cotable {
* @min_initial_entries: Min number of initial intries at cotable allocation
* for this cotable type.
* @size: Size of each entry.
+ * @unbind_func: Unbind call-back function.
*/
struct vmw_cotable_info {
u32 min_initial_entries;
@@ -297,7 +298,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
*
* @res: Pointer to the cotable resource.
* @readback: Whether to read back cotable data to the backup buffer.
- * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
+ * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller
* for convenience / fencing.
*
* Unbinds the cotable from the device and fences the backup buffer.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index dd69b51c40e4..6910111099c8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -47,12 +47,6 @@
#define VMW_MIN_INITIAL_WIDTH 800
#define VMW_MIN_INITIAL_HEIGHT 600
-#ifndef VMWGFX_GIT_VERSION
-#define VMWGFX_GIT_VERSION "Unknown"
-#endif
-
-#define VMWGFX_REPO "In Tree"
-
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
@@ -153,7 +147,7 @@
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_MSG, \
struct drm_vmw_msg_arg)
-/**
+/*
* The core DRM version of this macro doesn't account for
* DRM_COMMAND_BASE.
*/
@@ -161,7 +155,7 @@
#define VMW_IOCTL_DEF(ioctl, func, flags) \
[DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_IOCTL_##ioctl, flags, func}
-/**
+/*
* Ioctl definitions.
*/
@@ -526,7 +520,7 @@ static void vmw_release_device_late(struct vmw_private *dev_priv)
vmw_fifo_release(dev_priv, &dev_priv->fifo);
}
-/**
+/*
* Sets the initial_[width|height] fields on the given vmw_private.
*
* It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
@@ -599,7 +593,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
/**
* vmw_dma_masks - set required page- and dma masks
*
- * @dev: Pointer to struct drm-device
+ * @dev_priv: Pointer to struct drm-device
*
* With 32-bit we can only handle 32 bit PFNs. Optionally set that
* restriction also for 64-bit systems.
@@ -885,12 +879,12 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
drm_vma_offset_manager_init(&dev_priv->vma_manager,
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
- ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
- dev_priv->drm.dev,
- dev_priv->drm.anon_inode->i_mapping,
- &dev_priv->vma_manager,
- dev_priv->map_mode == vmw_dma_alloc_coherent,
- false);
+ ret = ttm_device_init(&dev_priv->bdev, &vmw_bo_driver,
+ dev_priv->drm.dev,
+ dev_priv->drm.anon_inode->i_mapping,
+ &dev_priv->vma_manager,
+ dev_priv->map_mode == vmw_dma_alloc_coherent,
+ false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
goto out_no_bdev;
@@ -967,8 +961,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
if (ret)
goto out_no_fifo;
- DRM_INFO("Atomic: %s\n", (dev_priv->drm.driver->driver_features & DRIVER_ATOMIC)
- ? "yes." : "no.");
if (dev_priv->sm_type == VMW_SM_5)
DRM_INFO("SM5 support available.\n");
if (dev_priv->sm_type == VMW_SM_4_1)
@@ -976,11 +968,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
if (dev_priv->sm_type == VMW_SM_4)
DRM_INFO("SM4 support available.\n");
- snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s",
- VMWGFX_REPO, VMWGFX_GIT_VERSION);
- vmw_host_log(host_log);
-
- memset(host_log, 0, sizeof(host_log));
snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
VMWGFX_DRIVER_PATCHLEVEL);
@@ -1007,7 +994,7 @@ out_no_kms:
vmw_gmrid_man_fini(dev_priv, VMW_PL_GMR);
vmw_vram_manager_fini(dev_priv);
out_no_vram:
- (void)ttm_bo_device_release(&dev_priv->bdev);
+ ttm_device_fini(&dev_priv->bdev);
out_no_bdev:
vmw_fence_manager_takedown(dev_priv->fman);
out_no_fman:
@@ -1054,7 +1041,7 @@ static void vmw_driver_unload(struct drm_device *dev)
if (dev_priv->has_mob)
vmw_gmrid_man_fini(dev_priv, VMW_PL_MOB);
vmw_vram_manager_fini(dev_priv);
- (void) ttm_bo_device_release(&dev_priv->bdev);
+ ttm_device_fini(&dev_priv->bdev);
drm_vma_offset_manager_destroy(&dev_priv->vma_manager);
vmw_release_device_late(dev_priv);
vmw_fence_manager_takedown(dev_priv->fman);
@@ -1268,6 +1255,7 @@ static void vmw_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ ttm_mem_global_release(&ttm_mem_glob);
drm_dev_unregister(dev);
vmw_driver_unload(dev);
}
@@ -1383,7 +1371,7 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
- while (ttm_bo_swapout(&ctx) == 0);
+ while (ttm_bo_swapout(&ctx, GFP_KERNEL) > 0);
if (dev_priv->enable_fb)
vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
@@ -1516,9 +1504,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(vmw))
return PTR_ERR(vmw);
- vmw->drm.pdev = pdev;
pci_set_drvdata(pdev, &vmw->drm);
+ ret = ttm_mem_global_init(&ttm_mem_glob, &pdev->dev);
+ if (ret)
+ return ret;
+
ret = vmw_driver_load(vmw, ent->device);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 5fa5bcd20cc5..bb2ce6327944 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -55,10 +55,10 @@
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20200114"
+#define VMWGFX_DRIVER_DATE "20210218"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 18
-#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_DRIVER_PATCHLEVEL 1
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_VALIDATIONS 2048
@@ -484,7 +484,7 @@ enum vmw_sm_type {
struct vmw_private {
struct drm_device drm;
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
struct vmw_fifo_state fifo;
@@ -999,7 +999,7 @@ extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_nonfixed_placement;
-extern struct ttm_bo_driver vmw_bo_driver;
+extern struct ttm_device_funcs vmw_bo_driver;
extern const struct vmw_sg_table *
vmw_bo_sg_table(struct ttm_buffer_object *bo);
extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
@@ -1525,6 +1525,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
*buf = NULL;
if (tmp_buf != NULL) {
+ if (tmp_buf->base.pin_count > 0)
+ ttm_bo_unpin(&tmp_buf->base);
ttm_bo_put(&tmp_buf->base);
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 462f17320708..7a24196f92c3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -80,7 +80,8 @@ struct vmw_relocation {
* with a NOP.
* @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
* validation is -1, the command is replaced with a NOP. Otherwise no action.
- */
+ * @vmw_res_rel_max: Last value in the enum - used for error checking
+*/
enum vmw_resource_relocation_type {
vmw_res_rel_normal,
vmw_res_rel_nop,
@@ -122,9 +123,11 @@ struct vmw_ctx_validation_info {
/**
* struct vmw_cmd_entry - Describe a command for the verifier
*
+ * @func: Call-back to handle the command.
* @user_allow: Whether allowed from the execbuf ioctl.
* @gb_disable: Whether disabled if guest-backed objects are available.
* @gb_enable: Whether enabled iff guest-backed objects are available.
+ * @cmd_name: Name of the command.
*/
struct vmw_cmd_entry {
int (*func) (struct vmw_private *, struct vmw_sw_context *,
@@ -203,6 +206,7 @@ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
*
* @dev_priv: Pointer to the device private:
* @sw_context: The command submission context
+ * @res: Pointer to the resource
* @node: The validation node holding the context resource metadata
*/
static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
@@ -509,7 +513,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
- * @list: Pointer to head of relocation list.
+ * @sw_context: Pointer to the software context.
* @res: The resource.
* @offset: Offset into the command buffer currently being parsed where the id
* that needs fixup is located. Granularity is one byte.
@@ -639,7 +643,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
* @converter: User-space visisble type specific information.
* @id_loc: Pointer to the location in the command buffer currently being parsed
* from where the user-space resource id handle is located.
- * @p_val: Pointer to pointer to resource validalidation node. Populated on
+ * @p_res: Pointer to pointer to resource validalidation node. Populated on
* exit.
*/
static int
@@ -1700,7 +1704,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
- * @val_node: The validation node representing the resource.
+ * @res: Pointer to the resource.
* @buf_id: Pointer to the user-space backup buffer handle in the command
* stream.
* @backup_offset: Offset of backup into MOB.
@@ -3739,7 +3743,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
return 0;
}
-/**
+/*
* vmw_execbuf_fence_commands - create and submit a command stream fence
*
* Creates a fence object and submits a command stream marker.
@@ -3939,9 +3943,9 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
* On successful return, the function returns a pointer to the data in the
* command buffer and *@header is set to non-NULL.
*
- * If command buffers could not be used, the function will return the value of
- * @kernel_commands on function call. That value may be NULL. In that case, the
- * value of *@header will be set to NULL.
+ * @kernel_commands: If command buffers could not be used, the function will
+ * return the value of @kernel_commands on function call. That value may be
+ * NULL. In that case, the value of *@header will be set to NULL.
*
* If an error is encountered, the function will return a pointer error value.
* If the function is interrupted by a signal while sleeping, it will return
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 378ec7600154..23523eb3cac2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -58,13 +58,11 @@ struct vmw_user_fence {
/**
* struct vmw_event_fence_action - fence action that delivers a drm event.
*
- * @e: A struct drm_pending_event that controls the event delivery.
* @action: A struct vmw_fence_action to hook up to a fence.
+ * @event: A pointer to the pending event.
* @fence: A referenced pointer to the fence to keep it alive while @action
* hangs on it.
* @dev: Pointer to a struct drm_device so we can access the event stuff.
- * @kref: Both @e and @action has destructors, so we need to refcount.
- * @size: Size accounted for this object.
* @tv_sec: If non-null, the variable pointed to will be assigned
* current time tv_sec val when the fence signals.
* @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
@@ -87,7 +85,7 @@ fman_from_fence(struct vmw_fence_obj *fence)
return container_of(fence->base.lock, struct vmw_fence_manager, lock);
}
-/**
+/*
* Note on fencing subsystem usage of irqs:
* Typically the vmw_fences_update function is called
*
@@ -250,7 +248,7 @@ static const struct dma_fence_ops vmw_fence_ops = {
};
-/**
+/*
* Execute signal actions on fences recently signaled.
* This is done from a workqueue so we don't have to execute
* signal actions from atomic context.
@@ -708,7 +706,7 @@ int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
}
-/**
+/*
* vmw_fence_fifo_down - signal all unsignaled fence objects.
*/
@@ -948,8 +946,8 @@ static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
/**
* vmw_fence_obj_add_action - Add an action to a fence object.
*
- * @fence - The fence object.
- * @action - The action to add.
+ * @fence: The fence object.
+ * @action: The action to add.
*
* Note that the action callbacks may be executed before this function
* returns.
@@ -1001,6 +999,10 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
* @fence: The fence object on which to post the event.
* @event: Event to be posted. This event should've been alloced
* using k[mz]alloc, and should've been completely initialized.
+ * @tv_sec: If non-null, the variable pointed to will be assigned
+ * current time tv_sec val when the fence signals.
+ * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
+ * be assigned the current time tv_usec val when the fence signals.
* @interruptible: Interruptible waits if possible.
*
* As a side effect, the object pointed to by @event may have been
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 80af8772b8c2..b36032964b2f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -437,7 +437,7 @@ __poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
* @filp: See the linux fops read documentation.
* @buffer: See the linux fops read documentation.
* @count: See the linux fops read documentation.
- * offset: See the linux fops read documentation.
+ * @offset: See the linux fops read documentation.
*
* Wrapper around the drm_read function that makes sure the device is
* processing the fifo if drm_read decides to wait.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 9a89f658e501..abbca8b0b3c5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -370,12 +370,16 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
void
vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
s32 hotspot_x, hotspot_y;
int ret = 0;
@@ -383,9 +387,9 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y;
- if (plane->state->fb) {
- hotspot_x += plane->state->fb->hot_x;
- hotspot_y += plane->state->fb->hot_y;
+ if (new_state->fb) {
+ hotspot_x += new_state->fb->hot_x;
+ hotspot_y += new_state->fb->hot_y;
}
du->cursor_surface = vps->surf;
@@ -400,8 +404,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_y);
} else if (vps->bo) {
ret = vmw_cursor_update_bo(dev_priv, vps->bo,
- plane->state->crtc_w,
- plane->state->crtc_h,
+ new_state->crtc_w,
+ new_state->crtc_h,
hotspot_x, hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
@@ -409,8 +413,8 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
}
if (!ret) {
- du->cursor_x = plane->state->crtc_x + du->set_gui_x;
- du->cursor_y = plane->state->crtc_y + du->set_gui_y;
+ du->cursor_x = new_state->crtc_x + du->set_gui_x;
+ du->cursor_y = new_state->crtc_y + du->set_gui_y;
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + hotspot_x,
@@ -437,26 +441,28 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
* Returns 0 on success
*/
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state = NULL;
- struct drm_framebuffer *new_fb = state->fb;
+ struct drm_framebuffer *new_fb = new_state->fb;
int ret;
- if (state->crtc)
- crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
+ if (new_state->crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_state->crtc);
- ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true);
if (!ret && new_fb) {
- struct drm_crtc *crtc = state->crtc;
- struct vmw_connector_state *vcs;
+ struct drm_crtc *crtc = new_state->crtc;
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
- vcs = vmw_connector_state_to_vcs(du->connector.state);
+ vmw_connector_state_to_vcs(du->connector.state);
}
@@ -468,7 +474,7 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
* vmw_du_cursor_plane_atomic_check - check if the new state is okay
*
* @plane: cursor plane
- * @state: info on the new plane state
+ * @new_state: info on the new plane state
*
* This is a chance to fail if the new cursor state does not fit
* our requirements.
@@ -476,8 +482,10 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
* Returns 0 on success
*/
int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
int ret = 0;
struct drm_crtc_state *crtc_state = NULL;
struct vmw_surface *surface = NULL;
@@ -891,7 +899,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
int ret;
- struct drm_format_name_buf format_name;
/* 3D is only supported on HWv8 and newer hosts */
if (dev_priv->active_display_unit == vmw_du_legacy)
@@ -929,8 +936,8 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
format = SVGA3D_A1R5G5B5;
break;
default:
- DRM_ERROR("Invalid pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
+ DRM_ERROR("Invalid pixel format: %p4cc\n",
+ &mode_cmd->pixel_format);
return -EINVAL;
}
@@ -1058,7 +1065,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.dirty = vmw_framebuffer_bo_dirty_ext,
};
-/**
+/*
* Pin the bofer in a location suitable for access by the
* display system.
*/
@@ -1145,7 +1152,6 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
uint32_t format;
struct vmw_resource *res;
unsigned int bytes_pp;
- struct drm_format_name_buf format_name;
int ret;
switch (mode_cmd->pixel_format) {
@@ -1167,8 +1173,8 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
break;
default:
- DRM_ERROR("Invalid framebuffer format %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
+ DRM_ERROR("Invalid framebuffer format %p4cc\n",
+ &mode_cmd->pixel_format);
return -EINVAL;
}
@@ -1212,7 +1218,6 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
struct drm_device *dev = &dev_priv->drm;
struct vmw_framebuffer_bo *vfbd;
unsigned int requested_size;
- struct drm_format_name_buf format_name;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
@@ -1232,8 +1237,8 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
case DRM_FORMAT_RGB565:
break;
default:
- DRM_ERROR("Invalid pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
+ DRM_ERROR("Invalid pixel format: %p4cc\n",
+ &mode_cmd->pixel_format);
return -EINVAL;
}
}
@@ -1268,6 +1273,7 @@ out_err1:
/**
* vmw_kms_srf_ok - check if a surface can be created
*
+ * @dev_priv: Pointer to device private struct.
* @width: requested width
* @height: requested height
*
@@ -1779,10 +1785,6 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv)
drm_property_create_range(&dev_priv->drm,
DRM_MODE_PROP_IMMUTABLE,
"hotplug_mode_update", 0, 1);
-
- if (!dev_priv->hotplug_mode_update_property)
- return;
-
}
int vmw_kms_init(struct vmw_private *dev_priv)
@@ -1897,7 +1899,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
}
-/**
+/*
* Function called by DRM code called with vbl_lock held.
*/
u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
@@ -1905,7 +1907,7 @@ u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
return 0;
}
-/**
+/*
* Function called by DRM code called with vbl_lock held.
*/
int vmw_enable_vblank(struct drm_crtc *crtc)
@@ -1913,7 +1915,7 @@ int vmw_enable_vblank(struct drm_crtc *crtc)
return -EINVAL;
}
-/**
+/*
* Function called by DRM code called with vbl_lock held.
*/
void vmw_disable_vblank(struct drm_crtc *crtc)
@@ -2057,6 +2059,10 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74500, 1280, 1344,
+ 1472, 1664, 0, 720, 723, 728, 748, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
@@ -2101,6 +2107,10 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 173000, 1920, 2048,
+ 2248, 2576, 0, 1080, 1083, 1088, 1120, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
@@ -2109,10 +2119,26 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1440@60Hz */
+ { DRM_MODE("2560x1440", DRM_MODE_TYPE_DRIVER, 241500, 2560, 2608,
+ 2640, 2720, 0, 1440, 1443, 1448, 1481, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2880x1800@60Hz */
+ { DRM_MODE("2880x1800", DRM_MODE_TYPE_DRIVER, 337500, 2880, 2928,
+ 2960, 3040, 0, 1800, 1803, 1809, 1852, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 3840x2160@60Hz */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 533000, 3840, 3888,
+ 3920, 4000, 0, 2160, 2163, 2168, 2222, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 3840x2400@60Hz */
+ { DRM_MODE("3840x2400", DRM_MODE_TYPE_DRIVER, 592250, 3840, 3888,
+ 3920, 4000, 0, 2400, 2403, 2409, 2469, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* Terminate */
{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
};
@@ -2121,7 +2147,7 @@ static struct drm_display_mode vmw_kms_connector_builtin[] = {
* vmw_guess_mode_timing - Provide fake timings for a
* 60Hz vrefresh mode.
*
- * @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
+ * @mode: Pointer to a struct drm_display_mode with hdisplay and vdisplay
* members filled in.
*/
void vmw_guess_mode_timing(struct drm_display_mode *mode)
@@ -2176,6 +2202,7 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
mode->hdisplay = du->pref_width;
mode->vdisplay = du->pref_height;
vmw_guess_mode_timing(mode);
+ drm_mode_set_name(mode);
if (vmw_kms_validate_mode_vram(dev_priv,
mode->hdisplay * assumed_bpp,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 03f3694015ce..bbc809f7bd8a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -245,7 +245,7 @@ struct vmw_framebuffer_bo {
};
-static const uint32_t vmw_primary_plane_formats[] = {
+static const uint32_t __maybe_unused vmw_primary_plane_formats[] = {
DRM_FORMAT_XRGB1555,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
@@ -253,7 +253,7 @@ static const uint32_t vmw_primary_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
-static const uint32_t vmw_cursor_plane_formats[] = {
+static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
@@ -456,11 +456,11 @@ void vmw_du_cursor_plane_destroy(struct drm_plane *plane);
/* Atomic Helpers */
int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state);
void vmw_du_plane_cleanup_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 9a9508edbc9e..87e0b303d900 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -49,7 +49,7 @@ struct vmw_legacy_display {
struct vmw_framebuffer *fb;
};
-/**
+/*
* Display unit using the legacy register interface.
*/
struct vmw_legacy_display_unit {
@@ -206,6 +206,7 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc)
* vmw_ldu_crtc_atomic_enable - Noop
*
* @crtc: CRTC associated with the new screen
+ * @state: Unused
*
* This is called after a mode set has been completed. Here's
* usually a good place to call vmw_ldu_add_active/vmw_ldu_del_active
@@ -221,6 +222,7 @@ static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
* vmw_ldu_crtc_atomic_disable - Turns off CRTC
*
* @crtc: CRTC to be turned off
+ * @state: Unused
*/
static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
@@ -282,18 +284,22 @@ drm_connector_helper_funcs vmw_ldu_connector_helper_funcs = {
static void
vmw_ldu_primary_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct vmw_private *dev_priv;
struct vmw_legacy_display_unit *ldu;
struct vmw_framebuffer *vfb;
struct drm_framebuffer *fb;
- struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+ struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc;
ldu = vmw_crtc_to_ldu(crtc);
dev_priv = vmw_priv(plane->dev);
- fb = plane->state->fb;
+ fb = new_state->fb;
vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index a372980fe6a5..a0b53141dded 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -277,6 +277,7 @@ out_no_setup:
&batch->otables[i]);
}
+ ttm_bo_unpin(batch->otable_bo);
ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
return ret;
@@ -342,6 +343,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
+ ttm_bo_unpin(batch->otable_bo);
ttm_bo_put(batch->otable_bo);
batch->otable_bo = NULL;
}
@@ -528,6 +530,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
void vmw_mob_destroy(struct vmw_mob *mob)
{
if (mob->pt_bo) {
+ ttm_bo_unpin(mob->pt_bo);
ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
}
@@ -643,6 +646,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
out_no_cmd_space:
vmw_fifo_resource_dec(dev_priv);
if (pt_set_up) {
+ ttm_bo_unpin(mob->pt_bo);
ttm_bo_put(mob->pt_bo);
mob->pt_bo = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 15b5bde69324..609269625468 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -253,7 +253,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
* vmw_send_msg: Sends a message to the host
*
* @channel: RPC channel
- * @logmsg: NULL terminated string
+ * @msg: NULL terminated string
*
* Returns: 0 on success
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index d6d282c13b7f..ac4a9b722279 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -42,7 +42,7 @@ struct vmw_stream {
struct drm_vmw_control_stream_arg saved;
};
-/**
+/*
* Overlay control
*/
struct vmw_overlay {
@@ -85,7 +85,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
cmd->flush.streamId = stream_id;
}
-/**
+/*
* Send put command to hw.
*
* Returns
@@ -174,7 +174,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
return 0;
}
-/**
+/*
* Send stop command to hw.
*
* Returns
@@ -216,7 +216,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
return 0;
}
-/**
+/*
* Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
*
* With the introduction of screen objects buffers could now be
@@ -235,7 +235,7 @@ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
return vmw_bo_pin_in_vram_or_gmr(dev_priv, buf, inter);
}
-/**
+/*
* Stop or pause a stream.
*
* If the stream is paused the no evict flag is removed from the buffer
@@ -285,7 +285,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
return 0;
}
-/**
+/*
* Update a stream and send any put or stop fifo commands needed.
*
* The caller must hold the overlay lock.
@@ -353,7 +353,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
return 0;
}
-/**
+/*
* Try to resume all paused streams.
*
* Used by the kms code after moving a new scanout buffer to vram.
@@ -387,7 +387,7 @@ int vmw_overlay_resume_all(struct vmw_private *dev_priv)
return 0;
}
-/**
+/*
* Pauses all active streams.
*
* Used by the kms code when moving a new scanout buffer to vram.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index d1e7b9608145..c3a724e37104 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -202,7 +202,6 @@ int vmw_resource_alloc_id(struct vmw_resource *res)
*
* @dev_priv: Pointer to a device private struct.
* @res: The struct vmw_resource to initialize.
- * @obj_type: Resource object type.
* @delay_id: Boolean whether to defer device id allocation until
* the first validation.
* @res_free: Resource destructor.
@@ -288,8 +287,6 @@ out_bad_resource:
* @tfile: Pointer to a struct ttm_object_file identifying the caller
* @handle: The TTM user-space handle
* @converter: Pointer to an object describing the resource type
- * @p_res: On successful return the location pointed to will contain
- * a pointer to a refcounted struct vmw_resource.
*
* If the handle can't be found or is associated with an incorrect resource
* type, -EINVAL will be returned.
@@ -315,7 +312,7 @@ vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
return converter->base_obj_to_res(base);
}
-/**
+/*
* Helper function that looks either a surface or bo.
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
@@ -388,6 +385,7 @@ out_no_bo:
* @res: The resource to make visible to the device.
* @val_buf: Information about a buffer possibly
* containing backup data if a bind operation is needed.
+ * @dirtying: Transfer dirty regions.
*
* On hardware resource shortage, this function returns -EBUSY and
* should be retried once resources have been freed up.
@@ -586,7 +584,7 @@ out_no_reserve:
return ret;
}
-/**
+/*
* vmw_resource_reserve - Reserve a resource for command submission
*
* @res: The resource to reserve.
@@ -858,7 +856,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem)
{
struct vmw_buffer_object *dx_query_mob;
- struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_device *bdev = bo->bdev;
struct vmw_private *dev_priv;
@@ -973,7 +971,7 @@ void vmw_resource_evict_all(struct vmw_private *dev_priv)
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
-/**
+/*
* vmw_resource_pin - Add a pin reference on a resource
*
* @res: The resource to add a pin reference on
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b0db059b8cfb..9bc9a0714664 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -84,7 +84,7 @@ struct vmw_kms_sou_define_gmrfb {
SVGAFifoCmdDefineGMRFB body;
};
-/**
+/*
* Display unit using screen objects.
*/
struct vmw_screen_object_unit {
@@ -112,7 +112,7 @@ static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
vmw_sou_destroy(vmw_crtc_to_sou(crtc));
}
-/**
+/*
* Send the fifo command to create a screen.
*/
static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
@@ -160,7 +160,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
return 0;
}
-/**
+/*
* Send the fifo command to destroy a screen.
*/
static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
@@ -263,7 +263,7 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc)
/**
* vmw_sou_crtc_helper_prepare - Noop
*
- * @crtc: CRTC associated with the new screen
+ * @crtc: CRTC associated with the new screen
*
* Prepares the CRTC for a mode set, but we don't need to do anything here.
*/
@@ -275,6 +275,7 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
* vmw_sou_crtc_atomic_enable - Noop
*
* @crtc: CRTC associated with the new screen
+ * @state: Unused
*
* This is called after a mode set has been completed.
*/
@@ -287,6 +288,7 @@ static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
* vmw_sou_crtc_atomic_disable - Turns off CRTC
*
* @crtc: CRTC to be turned off
+ * @state: Unused
*/
static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
@@ -728,18 +730,20 @@ static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv,
static void
vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct drm_crtc *crtc = plane->state->crtc;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc *crtc = new_state->crtc;
struct drm_pending_vblank_event *event = NULL;
struct vmw_fence_obj *fence = NULL;
int ret;
/* In case of device error, maintain consistent atomic state */
- if (crtc && plane->state->fb) {
+ if (crtc && new_state->fb) {
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_framebuffer *vfb =
- vmw_framebuffer_to_vfb(plane->state->fb);
+ vmw_framebuffer_to_vfb(new_state->fb);
if (vfb->bo)
ret = vmw_sou_plane_update_bo(dev_priv, plane,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 905ae50aaa2a..a0db06564013 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -125,7 +125,7 @@ static const struct vmw_res_func vmw_dx_shader_func = {
.commit_notify = vmw_dx_shader_commit_notify,
};
-/**
+/*
* Shader management:
*/
@@ -654,7 +654,7 @@ out_resource_init:
-/**
+/*
* User-space shader management:
*/
@@ -686,7 +686,7 @@ static void vmw_shader_free(struct vmw_resource *res)
vmw_shader_size);
}
-/**
+/*
* This function is called when user space has no more references on the
* base object. It releases the base-object's reference on the resource object.
*/
@@ -945,13 +945,13 @@ int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man,
* vmw_compat_shader_add - Create a compat shader and stage it for addition
* as a command buffer managed resource.
*
+ * @dev_priv: Pointer to device private structure.
* @man: Pointer to the compat shader manager identifying the shader namespace.
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @bytecode: Pointer to the bytecode of the shader.
* @shader_type: Shader type.
- * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
- * to be created with.
+ * @size: Command size.
* @list: Caller's list of staged command buffer resource actions.
*
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 7369dd86d3a9..2877c7b43bd7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -42,6 +42,7 @@
/**
* struct vmw_view - view metadata
*
+ * @rcu: RCU callback head
* @res: The struct vmw_resource we derive from
* @ctx: Non-refcounted pointer to the context this view belongs to.
* @srf: Refcounted pointer to the surface pointed to by this view.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index fbe977881364..7b11f0285786 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -61,6 +61,7 @@ enum stdu_content_type {
* @bottom: Bottom side of bounding box.
* @fb_left: Left side of the framebuffer/content bounding box
* @fb_top: Top of the framebuffer/content bounding box
+ * @pitch: framebuffer pitch (stride)
* @buf: buffer object when DMA-ing between buffer and screen targets.
* @sid: Surface ID when copying between surface and screen targets.
*/
@@ -109,8 +110,11 @@ struct vmw_stdu_update_gb_image {
* content_vfbs dimensions, then this is a pointer into the
* corresponding field in content_vfbs. If not, then this
* is a separate buffer to which content_vfbs will blit to.
- * @content_type: content_fb type
- * @defined: true if the current display unit has been initialized
+ * @content_fb_type: content_fb type
+ * @display_width: display width
+ * @display_height: display height
+ * @defined: true if the current display unit has been initialized
+ * @cpp: Bytes per pixel
*/
struct vmw_screen_target_display_unit {
struct vmw_display_unit base;
@@ -652,6 +656,7 @@ out_cleanup:
* @file_priv: Pointer to a struct drm-file identifying the caller. May be
* set to NULL, but then @user_fence_rep must also be set to NULL.
* @vfb: Pointer to the buffer-object backed framebuffer.
+ * @user_fence_rep: User-space provided structure for fence information.
* @clips: Array of clip rects. Either @clips or @vclips must be NULL.
* @vclips: Alternate array of clip rects. Either @clips or @vclips must
* be NULL.
@@ -1575,10 +1580,12 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
*/
static void
vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
- struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
- struct drm_crtc *crtc = plane->state->crtc;
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
+ struct drm_crtc *crtc = new_state->crtc;
struct vmw_screen_target_display_unit *stdu;
struct drm_pending_vblank_event *event;
struct vmw_fence_obj *fence = NULL;
@@ -1586,9 +1593,9 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
int ret;
/* If case of device error, maintain consistent atomic state */
- if (crtc && plane->state->fb) {
+ if (crtc && new_state->fb) {
struct vmw_framebuffer *vfb =
- vmw_framebuffer_to_vfb(plane->state->fb);
+ vmw_framebuffer_to_vfb(new_state->fb);
stdu = vmw_crtc_to_stdu(crtc);
dev_priv = vmw_priv(crtc->dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index f6cab77075a0..c3e55c1376eb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -41,10 +41,12 @@
/**
* struct vmw_user_surface - User-space visible surface resource
*
+ * @prime: The TTM prime object.
* @base: The TTM base object handling user-space visibility.
* @srf: The surface metadata.
* @size: TTM accounting size for the surface.
- * @master: master of the creating client. Used for security check.
+ * @master: Master of the creating client. Used for security check.
+ * @backup_base: The TTM base object of the backup buffer.
*/
struct vmw_user_surface {
struct ttm_prime_object prime;
@@ -69,7 +71,7 @@ struct vmw_surface_offset {
};
/**
- * vmw_surface_dirty - Surface dirty-tracker
+ * struct vmw_surface_dirty - Surface dirty-tracker
* @cache: Cached layout information of the surface.
* @size: Accounting size for the struct vmw_surface_dirty.
* @num_subres: Number of subresources.
@@ -162,7 +164,7 @@ static const struct vmw_res_func vmw_gb_surface_func = {
.clean = vmw_surface_clean,
};
-/**
+/*
* struct vmw_surface_dma - SVGA3D DMA command
*/
struct vmw_surface_dma {
@@ -172,7 +174,7 @@ struct vmw_surface_dma {
SVGA3dCmdSurfaceDMASuffix suffix;
};
-/**
+/*
* struct vmw_surface_define - SVGA3D Surface Define command
*/
struct vmw_surface_define {
@@ -180,7 +182,7 @@ struct vmw_surface_define {
SVGA3dCmdDefineSurface body;
};
-/**
+/*
* struct vmw_surface_destroy - SVGA3D Surface Destroy command
*/
struct vmw_surface_destroy {
@@ -544,6 +546,7 @@ static int vmw_legacy_srf_bind(struct vmw_resource *res,
*
* @res: Pointer to a struct vmw_res embedded in a struct
* vmw_surface.
+ * @readback: Readback - only true if dirty
* @val_buf: Pointer to a struct ttm_validate_buffer containing
* information about the backup buffer.
*
@@ -1060,8 +1063,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
/**
* vmw_surface_define_encode - Encode a surface_define command.
*
- * @srf: Pointer to a struct vmw_surface object.
- * @cmd_space: Pointer to memory area in which the commands should be encoded.
+ * @res: Pointer to a struct vmw_resource embedded in a struct
+ * vmw_surface.
*/
static int vmw_gb_surface_create(struct vmw_resource *res)
{
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index e8e79de255cf..eb63cbe64909 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -11,6 +11,7 @@
/**
* struct vmw_thp_manager - Range manager implementing huge page alignment
*
+ * @manager: TTM resource manager.
* @mm: The underlying range manager. Protected by @lock.
* @lock: Manager lock.
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index dbb068830d80..63f10c865061 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -265,6 +265,7 @@ static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
*
* @viter: Pointer to the iterator to initialize
* @vsgt: Pointer to a struct vmw_sg_table to initialize from
+ * @p_offset: Pointer offset used to update current array position
*
* Note that we're following the convention of __sg_page_iter_start, so that
* the iterator doesn't point to a valid page after initialization; it has
@@ -482,7 +483,7 @@ const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
}
-static int vmw_ttm_bind(struct ttm_bo_device *bdev,
+static int vmw_ttm_bind(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
struct vmw_ttm_tt *vmw_be =
@@ -526,7 +527,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev,
return ret;
}
-static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
+static void vmw_ttm_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
@@ -552,7 +553,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
}
-static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
+static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
@@ -572,21 +573,42 @@ static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
}
-static int vmw_ttm_populate(struct ttm_bo_device *bdev,
+static int vmw_ttm_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
+ unsigned int i;
+ int ret;
+
/* TODO: maybe completely drop this ? */
if (ttm_tt_is_populated(ttm))
return 0;
- return ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_mem_global_alloc_page(&ttm_mem_glob, ttm->pages[i],
+ PAGE_SIZE, ctx);
+ if (ret)
+ goto error;
+ }
+ return 0;
+
+error:
+ while (i--)
+ ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
+ PAGE_SIZE);
+ ttm_pool_free(&bdev->pool, ttm);
+ return ret;
}
-static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
+static void vmw_ttm_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
dma_ttm);
+ unsigned int i;
if (vmw_tt->mob) {
vmw_mob_destroy(vmw_tt->mob);
@@ -594,6 +616,11 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
}
vmw_ttm_unmap_dma(vmw_tt);
+
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm_mem_global_free_page(&ttm_mem_glob, ttm->pages[i],
+ PAGE_SIZE);
+
ttm_pool_free(&bdev->pool, ttm);
}
@@ -639,7 +666,7 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
return vmw_user_bo_verify_access(bo, tfile);
}
-static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
+static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
@@ -664,6 +691,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resourc
* vmw_move_notify - TTM move_notify_callback
*
* @bo: The TTM buffer object about to move.
+ * @evict: Unused
* @mem: The struct ttm_resource indicating to what memory
* region the move is taking place.
*
@@ -742,7 +770,7 @@ vmw_delete_mem_notify(struct ttm_buffer_object *bo)
vmw_move_notify(bo, false, NULL);
}
-struct ttm_bo_driver vmw_bo_driver = {
+struct ttm_device_funcs vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f2e2bf6d1421..e7570f422400 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -48,7 +48,6 @@ struct vmw_validation_bo_node {
u32 as_mob : 1;
u32 cpu_blit : 1;
};
-
/**
* struct vmw_validation_res_node - Resource validation metadata.
* @head: List head for the resource validation list.
@@ -64,6 +63,8 @@ struct vmw_validation_bo_node {
* @first_usage: True iff the resource has been seen only once in the current
* validation batch.
* @reserved: Whether the resource is currently reserved by this process.
+ * @dirty_set: Change dirty status of the resource.
+ * @dirty: Dirty information VMW_RES_DIRTY_XX.
* @private: Optionally additional memory for caller-private data.
*
* Bit fields are used since these structures are allocated and freed in
@@ -205,7 +206,7 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
* vmw_validation_find_res_dup - Find a duplicate resource entry in the
* validation context's lists.
* @ctx: The validation context to search.
- * @vbo: The buffer object to search for.
+ * @res: Reference counted resource pointer.
*
* Return: Pointer to the struct vmw_validation_bo_node referencing the
* duplicate, or NULL if none found.
diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig
index fab1373e1fb3..7eb0ce345547 100644
--- a/drivers/gpu/drm/xen/Kconfig
+++ b/drivers/gpu/drm/xen/Kconfig
@@ -1,15 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XEN
- bool "DRM Support for Xen guest OS"
- depends on XEN
- help
- Choose this option if you want to enable DRM support
- for Xen.
+ bool
config DRM_XEN_FRONTEND
tristate "Para-virtualized frontend driver for Xen guest OS"
- depends on DRM_XEN
- depends on DRM
+ depends on XEN && DRM
+ select DRM_XEN
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select XEN_XENBUS_FRONTEND
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index ef11b1e4de39..371202ebe900 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -13,6 +13,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -301,7 +302,7 @@ static const struct drm_simple_display_pipe_funcs display_funcs = {
.mode_valid = display_mode_valid,
.enable = display_enable,
.disable = display_disable,
- .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
.check = display_check,
.update = display_update,
};
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 148add0ca1d6..109d627968ac 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -1143,18 +1143,21 @@ static inline struct zynqmp_disp_layer *plane_to_layer(struct drm_plane *plane)
static int
zynqmp_disp_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_crtc_state *crtc_state;
- if (!state->crtc)
+ if (!new_plane_state->crtc)
return 0;
- crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ crtc_state = drm_atomic_get_crtc_state(state, new_plane_state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- return drm_atomic_helper_check_plane_state(state, crtc_state,
+ return drm_atomic_helper_check_plane_state(new_plane_state,
+ crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, false);
@@ -1162,8 +1165,10 @@ zynqmp_disp_plane_atomic_check(struct drm_plane *plane,
static void
zynqmp_disp_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct zynqmp_disp_layer *layer = plane_to_layer(plane);
if (!old_state->fb)
@@ -1174,13 +1179,15 @@ zynqmp_disp_plane_atomic_disable(struct drm_plane *plane,
static void
zynqmp_disp_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
struct zynqmp_disp_layer *layer = plane_to_layer(plane);
bool format_changed = false;
if (!old_state->fb ||
- old_state->fb->format->format != plane->state->fb->format->format)
+ old_state->fb->format->format != new_state->fb->format->format)
format_changed = true;
/*
@@ -1192,10 +1199,10 @@ zynqmp_disp_plane_atomic_update(struct drm_plane *plane,
if (old_state->fb)
zynqmp_disp_layer_disable(layer);
- zynqmp_disp_layer_set_format(layer, plane->state);
+ zynqmp_disp_layer_set_format(layer, new_state);
}
- zynqmp_disp_layer_update(layer, plane->state);
+ zynqmp_disp_layer_update(layer, new_state);
/* Enable or re-enable the plane is the format has changed. */
if (format_changed)
@@ -1472,8 +1479,6 @@ static void
zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
- struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
- crtc);
struct zynqmp_disp *disp = crtc_to_disp(crtc);
struct drm_plane_state *old_plane_state;
@@ -1482,10 +1487,9 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
* .shutdown() path if the plane is already disabled, skip
* zynqmp_disp_plane_atomic_disable() in that case.
*/
- old_plane_state = drm_atomic_get_old_plane_state(old_crtc_state->state,
- crtc->primary);
+ old_plane_state = drm_atomic_get_old_plane_state(state, crtc->primary);
if (old_plane_state)
- zynqmp_disp_plane_atomic_disable(crtc->primary, old_plane_state);
+ zynqmp_disp_plane_atomic_disable(crtc->primary, state);
zynqmp_disp_disable(disp);
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 78d787afe594..93bcca428e35 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -46,8 +46,10 @@ static const uint32_t vl_formats[] = {
#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
static int zx_vl_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *plane_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state;
@@ -57,7 +59,7 @@ static int zx_vl_plane_atomic_check(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -179,13 +181,14 @@ static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format,
}
static void zx_vl_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
struct zx_plane *zplane = to_zx_plane(plane);
- struct drm_plane_state *state = plane->state;
- struct drm_framebuffer *fb = state->fb;
- struct drm_rect *src = &state->src;
- struct drm_rect *dst = &state->dst;
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
+ struct drm_framebuffer *fb = new_state->fb;
+ struct drm_rect *src = &new_state->src;
+ struct drm_rect *dst = &new_state->dst;
struct drm_gem_cma_object *cma_obj;
void __iomem *layer = zplane->layer;
void __iomem *hbsc = zplane->hbsc;
@@ -257,8 +260,10 @@ static void zx_vl_plane_atomic_update(struct drm_plane *plane,
}
static void zx_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
+ plane);
struct zx_plane *zplane = to_zx_plane(plane);
void __iomem *hbsc = zplane->hbsc;
@@ -275,8 +280,10 @@ static const struct drm_plane_helper_funcs zx_vl_plane_helper_funcs = {
};
static int zx_gl_plane_atomic_check(struct drm_plane *plane,
- struct drm_plane_state *plane_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state;
@@ -284,7 +291,7 @@ static int zx_gl_plane_atomic_check(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return 0;
- crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
+ crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
if (WARN_ON(!crtc_state))
return -EINVAL;
@@ -347,10 +354,12 @@ static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h,
}
static void zx_gl_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
+ plane);
struct zx_plane *zplane = to_zx_plane(plane);
- struct drm_framebuffer *fb = plane->state->fb;
+ struct drm_framebuffer *fb = new_state->fb;
struct drm_gem_cma_object *cma_obj;
void __iomem *layer = zplane->layer;
void __iomem *csc = zplane->csc;
@@ -369,15 +378,15 @@ static void zx_gl_plane_atomic_update(struct drm_plane *plane,
format = fb->format->format;
stride = fb->pitches[0];
- src_x = plane->state->src_x >> 16;
- src_y = plane->state->src_y >> 16;
- src_w = plane->state->src_w >> 16;
- src_h = plane->state->src_h >> 16;
+ src_x = new_state->src_x >> 16;
+ src_y = new_state->src_y >> 16;
+ src_w = new_state->src_w >> 16;
+ src_h = new_state->src_h >> 16;
- dst_x = plane->state->crtc_x;
- dst_y = plane->state->crtc_y;
- dst_w = plane->state->crtc_w;
- dst_h = plane->state->crtc_h;
+ dst_x = new_state->crtc_x;
+ dst_y = new_state->crtc_y;
+ dst_w = new_state->crtc_w;
+ dst_h = new_state->crtc_h;
bpp = fb->format->cpp[0];
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 31d1342e61e8..31662c3a8c9e 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -265,13 +265,9 @@ static void v4l_print_fmtdesc(const void *arg, bool write_only)
{
const struct v4l2_fmtdesc *p = arg;
- pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%c%c%c%c, mbus_code=0x%04x, description='%.*s'\n",
+ pr_cont("index=%u, type=%s, flags=0x%x, pixelformat=%p4cc, mbus_code=0x%04x, description='%.*s'\n",
p->index, prt_names(p->type, v4l2_type_names),
- p->flags, (p->pixelformat & 0xff),
- (p->pixelformat >> 8) & 0xff,
- (p->pixelformat >> 16) & 0xff,
- (p->pixelformat >> 24) & 0xff,
- p->mbus_code,
+ p->flags, &p->pixelformat, p->mbus_code,
(int)sizeof(p->description), p->description);
}
@@ -293,12 +289,8 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
pix = &p->fmt.pix;
- pr_cont(", width=%u, height=%u, pixelformat=%c%c%c%c, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
- pix->width, pix->height,
- (pix->pixelformat & 0xff),
- (pix->pixelformat >> 8) & 0xff,
- (pix->pixelformat >> 16) & 0xff,
- (pix->pixelformat >> 24) & 0xff,
+ pr_cont(", width=%u, height=%u, pixelformat=%p4cc, field=%s, bytesperline=%u, sizeimage=%u, colorspace=%d, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
+ pix->width, pix->height, &pix->pixelformat,
prt_names(pix->field, v4l2_field_names),
pix->bytesperline, pix->sizeimage,
pix->colorspace, pix->flags, pix->ycbcr_enc,
@@ -307,12 +299,8 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
mp = &p->fmt.pix_mp;
- pr_cont(", width=%u, height=%u, format=%c%c%c%c, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
- mp->width, mp->height,
- (mp->pixelformat & 0xff),
- (mp->pixelformat >> 8) & 0xff,
- (mp->pixelformat >> 16) & 0xff,
- (mp->pixelformat >> 24) & 0xff,
+ pr_cont(", width=%u, height=%u, format=%p4cc, field=%s, colorspace=%d, num_planes=%u, flags=0x%x, ycbcr_enc=%u, quantization=%u, xfer_func=%u\n",
+ mp->width, mp->height, &mp->pixelformat,
prt_names(mp->field, v4l2_field_names),
mp->colorspace, mp->num_planes, mp->flags,
mp->ycbcr_enc, mp->quantization, mp->xfer_func);
@@ -337,13 +325,9 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_VBI_CAPTURE:
case V4L2_BUF_TYPE_VBI_OUTPUT:
vbi = &p->fmt.vbi;
- pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%c%c%c%c, start=%u,%u, count=%u,%u\n",
+ pr_cont(", sampling_rate=%u, offset=%u, samples_per_line=%u, sample_format=%p4cc, start=%u,%u, count=%u,%u\n",
vbi->sampling_rate, vbi->offset,
- vbi->samples_per_line,
- (vbi->sample_format & 0xff),
- (vbi->sample_format >> 8) & 0xff,
- (vbi->sample_format >> 16) & 0xff,
- (vbi->sample_format >> 24) & 0xff,
+ vbi->samples_per_line, &vbi->sample_format,
vbi->start[0], vbi->start[1],
vbi->count[0], vbi->count[1]);
break;
@@ -360,21 +344,13 @@ static void v4l_print_format(const void *arg, bool write_only)
case V4L2_BUF_TYPE_SDR_CAPTURE:
case V4L2_BUF_TYPE_SDR_OUTPUT:
sdr = &p->fmt.sdr;
- pr_cont(", pixelformat=%c%c%c%c\n",
- (sdr->pixelformat >> 0) & 0xff,
- (sdr->pixelformat >> 8) & 0xff,
- (sdr->pixelformat >> 16) & 0xff,
- (sdr->pixelformat >> 24) & 0xff);
+ pr_cont(", pixelformat=%p4cc\n", &sdr->pixelformat);
break;
case V4L2_BUF_TYPE_META_CAPTURE:
case V4L2_BUF_TYPE_META_OUTPUT:
meta = &p->fmt.meta;
- pr_cont(", dataformat=%c%c%c%c, buffersize=%u\n",
- (meta->dataformat >> 0) & 0xff,
- (meta->dataformat >> 8) & 0xff,
- (meta->dataformat >> 16) & 0xff,
- (meta->dataformat >> 24) & 0xff,
- meta->buffersize);
+ pr_cont(", dataformat=%p4cc, buffersize=%u\n",
+ &meta->dataformat, meta->buffersize);
break;
}
}
@@ -383,15 +359,10 @@ static void v4l_print_framebuffer(const void *arg, bool write_only)
{
const struct v4l2_framebuffer *p = arg;
- pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%c%c%c%c, bytesperline=%u, sizeimage=%u, colorspace=%d\n",
- p->capability, p->flags, p->base,
- p->fmt.width, p->fmt.height,
- (p->fmt.pixelformat & 0xff),
- (p->fmt.pixelformat >> 8) & 0xff,
- (p->fmt.pixelformat >> 16) & 0xff,
- (p->fmt.pixelformat >> 24) & 0xff,
- p->fmt.bytesperline, p->fmt.sizeimage,
- p->fmt.colorspace);
+ pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, height=%u, pixelformat=%p4cc, bytesperline=%u, sizeimage=%u, colorspace=%d\n",
+ p->capability, p->flags, p->base, p->fmt.width, p->fmt.height,
+ &p->fmt.pixelformat, p->fmt.bytesperline, p->fmt.sizeimage,
+ p->fmt.colorspace);
}
static void v4l_print_buftype(const void *arg, bool write_only)
@@ -761,13 +732,8 @@ static void v4l_print_frmsizeenum(const void *arg, bool write_only)
{
const struct v4l2_frmsizeenum *p = arg;
- pr_cont("index=%u, pixelformat=%c%c%c%c, type=%u",
- p->index,
- (p->pixel_format & 0xff),
- (p->pixel_format >> 8) & 0xff,
- (p->pixel_format >> 16) & 0xff,
- (p->pixel_format >> 24) & 0xff,
- p->type);
+ pr_cont("index=%u, pixelformat=%p4cc, type=%u",
+ p->index, &p->pixel_format, p->type);
switch (p->type) {
case V4L2_FRMSIZE_TYPE_DISCRETE:
pr_cont(", wxh=%ux%u\n",
@@ -793,13 +759,8 @@ static void v4l_print_frmivalenum(const void *arg, bool write_only)
{
const struct v4l2_frmivalenum *p = arg;
- pr_cont("index=%u, pixelformat=%c%c%c%c, wxh=%ux%u, type=%u",
- p->index,
- (p->pixel_format & 0xff),
- (p->pixel_format >> 8) & 0xff,
- (p->pixel_format >> 16) & 0xff,
- (p->pixel_format >> 24) & 0xff,
- p->width, p->height, p->type);
+ pr_cont("index=%u, pixelformat=%p4cc, wxh=%ux%u, type=%u",
+ p->index, &p->pixel_format, p->width, p->height, p->type);
switch (p->type) {
case V4L2_FRMIVAL_TYPE_DISCRETE:
pr_cont(", fps=%d/%d\n",
@@ -1459,12 +1420,8 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
return;
WARN(1, "Unknown pixelformat 0x%08x\n", fmt->pixelformat);
flags = 0;
- snprintf(fmt->description, sz, "%c%c%c%c%s",
- (char)(fmt->pixelformat & 0x7f),
- (char)((fmt->pixelformat >> 8) & 0x7f),
- (char)((fmt->pixelformat >> 16) & 0x7f),
- (char)((fmt->pixelformat >> 24) & 0x7f),
- (fmt->pixelformat & (1UL << 31)) ? "-BE" : "");
+ snprintf(fmt->description, sz, "%p4cc",
+ &fmt->pixelformat);
break;
}
}
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c
index 33595cc4778e..9ec969e136bf 100644
--- a/drivers/video/fbdev/amba-clcd.c
+++ b/drivers/video/fbdev/amba-clcd.c
@@ -35,19 +35,6 @@
/* This is limited to 16 characters when displayed by X startup */
static const char *clcd_name = "CLCD FB";
-/*
- * Unfortunately, the enable/disable functions may be called either from
- * process or IRQ context, and we _need_ to delay. This is _not_ good.
- */
-static inline void clcdfb_sleep(unsigned int ms)
-{
- if (in_atomic()) {
- mdelay(ms);
- } else {
- msleep(ms);
- }
-}
-
static inline void clcdfb_set_start(struct clcd_fb *fb)
{
unsigned long ustart = fb->fb.fix.smem_start;
@@ -77,7 +64,7 @@ static void clcdfb_disable(struct clcd_fb *fb)
val &= ~CNTL_LCDPWR;
writel(val, fb->regs + fb->off_cntl);
- clcdfb_sleep(20);
+ msleep(20);
}
if (val & CNTL_LCDEN) {
val &= ~CNTL_LCDEN;
@@ -109,7 +96,7 @@ static void clcdfb_enable(struct clcd_fb *fb, u32 cntl)
cntl |= CNTL_LCDEN;
writel(cntl, fb->regs + fb->off_cntl);
- clcdfb_sleep(20);
+ msleep(20);
/*
* and now apply power.
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index b80ba3d2a9b8..f58a545b3bf3 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -16,6 +16,7 @@
#include <linux/platform_device.h>
#include <linux/printk.h>
#include <linux/screen_info.h>
+#include <linux/pm_runtime.h>
#include <video/vga.h>
#include <asm/efi.h>
#include <drm/drm_utils.h> /* For drm_get_panel_orientation_quirk */
@@ -574,6 +575,7 @@ static int efifb_probe(struct platform_device *dev)
goto err_fb_dealoc;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
+ pm_runtime_get_sync(&efifb_pci_dev->dev);
return 0;
err_fb_dealoc:
@@ -600,6 +602,7 @@ static int efifb_remove(struct platform_device *pdev)
unregister_framebuffer(info);
sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
framebuffer_release(info);
+ pm_runtime_put(&efifb_pci_dev->dev);
return 0;
}
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index cfe63932f825..b191bef22d98 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -100,6 +100,14 @@ struct {
struct hwa742_request req_pool[REQ_POOL_SIZE];
struct list_head pending_req_list;
struct list_head free_req_list;
+
+ /*
+ * @req_lock: protect request slots pool and its tracking lists
+ * @req_sema: counter; slot allocators from task contexts must
+ * push it down before acquiring a slot. This
+ * guarantees that atomic contexts will always have
+ * a minimum of IRQ_REQ_POOL_SIZE slots available.
+ */
struct semaphore req_sema;
spinlock_t req_lock;
@@ -224,13 +232,13 @@ static void disable_tearsync(void)
hwa742_write_reg(HWA742_NDP_CTRL, b);
}
-static inline struct hwa742_request *alloc_req(void)
+static inline struct hwa742_request *alloc_req(bool can_sleep)
{
unsigned long flags;
struct hwa742_request *req;
int req_flags = 0;
- if (!in_interrupt())
+ if (can_sleep)
down(&hwa742.req_sema);
else
req_flags = REQ_FROM_IRQ_POOL;
@@ -399,8 +407,8 @@ static void send_frame_complete(void *data)
hwa742.int_ctrl->enable_plane(OMAPFB_PLANE_GFX, 0);
}
-#define ADD_PREQ(_x, _y, _w, _h) do { \
- req = alloc_req(); \
+#define ADD_PREQ(_x, _y, _w, _h, can_sleep) do {\
+ req = alloc_req(can_sleep); \
req->handler = send_frame_handler; \
req->complete = send_frame_complete; \
req->par.update.x = _x; \
@@ -413,7 +421,8 @@ static void send_frame_complete(void *data)
} while(0)
static void create_req_list(struct omapfb_update_window *win,
- struct list_head *req_head)
+ struct list_head *req_head,
+ bool can_sleep)
{
struct hwa742_request *req;
int x = win->x;
@@ -427,7 +436,7 @@ static void create_req_list(struct omapfb_update_window *win,
color_mode = win->format & OMAPFB_FORMAT_MASK;
if (x & 1) {
- ADD_PREQ(x, y, 1, height);
+ ADD_PREQ(x, y, 1, height, can_sleep);
width--;
x++;
flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
@@ -439,19 +448,19 @@ static void create_req_list(struct omapfb_update_window *win,
if (xspan * height * 2 > hwa742.max_transmit_size) {
yspan = hwa742.max_transmit_size / (xspan * 2);
- ADD_PREQ(x, ystart, xspan, yspan);
+ ADD_PREQ(x, ystart, xspan, yspan, can_sleep);
ystart += yspan;
yspan = height - yspan;
flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
}
- ADD_PREQ(x, ystart, xspan, yspan);
+ ADD_PREQ(x, ystart, xspan, yspan, can_sleep);
x += xspan;
width -= xspan;
flags &= ~OMAPFB_FORMAT_FLAG_TEARSYNC;
}
if (width)
- ADD_PREQ(x, y, 1, height);
+ ADD_PREQ(x, y, 1, height, can_sleep);
}
static void auto_update_complete(void *data)
@@ -461,12 +470,12 @@ static void auto_update_complete(void *data)
jiffies + HWA742_AUTO_UPDATE_TIME);
}
-static void hwa742_update_window_auto(struct timer_list *unused)
+static void __hwa742_update_window_auto(bool can_sleep)
{
LIST_HEAD(req_list);
struct hwa742_request *last;
- create_req_list(&hwa742.auto_update_window, &req_list);
+ create_req_list(&hwa742.auto_update_window, &req_list, can_sleep);
last = list_entry(req_list.prev, struct hwa742_request, entry);
last->complete = auto_update_complete;
@@ -475,6 +484,11 @@ static void hwa742_update_window_auto(struct timer_list *unused)
submit_req_list(&req_list);
}
+static void hwa742_update_window_auto(struct timer_list *unused)
+{
+ __hwa742_update_window_auto(false);
+}
+
int hwa742_update_window_async(struct fb_info *fbi,
struct omapfb_update_window *win,
void (*complete_callback)(void *arg),
@@ -497,7 +511,7 @@ int hwa742_update_window_async(struct fb_info *fbi,
goto out;
}
- create_req_list(win, &req_list);
+ create_req_list(win, &req_list, true);
last = list_entry(req_list.prev, struct hwa742_request, entry);
last->complete = complete_callback;
@@ -544,7 +558,7 @@ static void hwa742_sync(void)
struct hwa742_request *req;
struct completion comp;
- req = alloc_req();
+ req = alloc_req(true);
req->handler = sync_handler;
req->complete = NULL;
@@ -599,7 +613,7 @@ static int hwa742_set_update_mode(enum omapfb_update_mode mode)
omapfb_notify_clients(hwa742.fbdev, OMAPFB_EVENT_READY);
break;
case OMAPFB_AUTO_UPDATE:
- hwa742_update_window_auto(0);
+ __hwa742_update_window_auto(true);
break;
case OMAPFB_UPDATE_DISABLED:
break;
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index daa313f14335..d43b081d592f 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -2376,8 +2376,6 @@ static int dsi_sync_vc(struct platform_device *dsidev, int channel)
WARN_ON(!dsi_bus_is_locked(dsidev));
- WARN_ON(in_interrupt());
-
if (!dsi_vc_is_enabled(dsidev, channel))
return 0;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 533a047d07a2..62f0ded70681 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -477,9 +477,8 @@ static int simplefb_probe(struct platform_device *pdev)
simplefb_clocks_enable(par, pdev);
simplefb_regulators_enable(par, pdev);
- dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n",
- info->fix.smem_start, info->fix.smem_len,
- info->screen_base);
+ dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes\n",
+ info->fix.smem_start, info->fix.smem_len);
dev_info(&pdev->dev, "format=%s, mode=%dx%dx%d, linelength=%d\n",
params.format->name,
info->var.xres, info->var.yres,
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index ce7023e9115d..ac5a28eff2c8 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -66,6 +66,8 @@
*
* For an implementation of how to use this look at
* drm_atomic_helper_setup_commit() from the atomic helper library.
+ *
+ * See also drm_crtc_commit_wait().
*/
struct drm_crtc_commit {
/**
@@ -436,6 +438,8 @@ static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit)
kref_put(&commit->ref, __drm_crtc_commit_free);
}
+int drm_crtc_commit_wait(struct drm_crtc_commit *commit);
+
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
new file mode 100644
index 000000000000..cfc5adee3d13
--- /dev/null
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __DRM_GEM_ATOMIC_HELPER_H__
+#define __DRM_GEM_ATOMIC_HELPER_H__
+
+#include <linux/dma-buf-map.h>
+
+#include <drm/drm_plane.h>
+
+struct drm_simple_display_pipe;
+
+/*
+ * Plane Helpers
+ */
+
+int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state);
+int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+/*
+ * Helpers for planes with shadow buffers
+ */
+
+/**
+ * struct drm_shadow_plane_state - plane state for planes with shadow buffers
+ *
+ * For planes that use a shadow buffer, struct drm_shadow_plane_state
+ * provides the regular plane state plus mappings of the shadow buffer
+ * into kernel address space.
+ */
+struct drm_shadow_plane_state {
+ /** @base: plane state */
+ struct drm_plane_state base;
+
+ /* Transitional state - do not export or duplicate */
+
+ /**
+ * @map: Mappings of the plane's framebuffer BOs in to kernel address space
+ *
+ * The memory mappings stored in map should be established in the plane's
+ * prepare_fb callback and removed in the cleanup_fb callback.
+ */
+ struct dma_buf_map map[4];
+};
+
+/**
+ * to_drm_shadow_plane_state - upcasts from struct drm_plane_state
+ * @state: the plane state
+ */
+static inline struct drm_shadow_plane_state *
+to_drm_shadow_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct drm_shadow_plane_state, base);
+}
+
+void drm_gem_reset_shadow_plane(struct drm_plane *plane);
+struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane);
+void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SHADOW_PLANE_FUNCS -
+ * Initializes struct drm_plane_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_plane_funcs to use the rsp helper functions.
+ */
+#define DRM_GEM_SHADOW_PLANE_FUNCS \
+ .reset = drm_gem_reset_shadow_plane, \
+ .atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \
+ .atomic_destroy_state = drm_gem_destroy_shadow_plane_state
+
+int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
+void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS -
+ * Initializes struct drm_plane_helper_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_plane_helper_funcs to use the rsp helper
+ * functions.
+ */
+#define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \
+ .prepare_fb = drm_gem_prepare_shadow_fb, \
+ .cleanup_fb = drm_gem_cleanup_shadow_fb
+
+int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe);
+struct drm_plane_state *
+drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe);
+void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS -
+ * Initializes struct drm_simple_display_pipe_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_simple_display_pipe_funcs to use the rsp helper
+ * functions.
+ */
+#define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \
+ .prepare_fb = drm_gem_simple_kms_prepare_shadow_fb, \
+ .cleanup_fb = drm_gem_simple_kms_cleanup_shadow_fb, \
+ .reset_plane = drm_gem_simple_kms_reset_shadow_plane, \
+ .duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \
+ .destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state
+
+#endif /* __DRM_GEM_ATOMIC_HELPER_H__ */
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index 6b013154911d..6bdffc7aa124 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -9,9 +9,6 @@ struct drm_framebuffer;
struct drm_framebuffer_funcs;
struct drm_gem_object;
struct drm_mode_fb_cmd2;
-struct drm_plane;
-struct drm_plane_state;
-struct drm_simple_display_pipe;
#define AFBC_VENDOR_AND_TYPE_MASK GENMASK_ULL(63, 52)
@@ -44,8 +41,4 @@ int drm_gem_fb_afbc_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb);
-int drm_gem_fb_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state);
-int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
#endif
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index a4bac02249c2..288055d397d9 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -172,19 +172,19 @@ struct drm_vram_mm {
uint64_t vram_base;
size_t vram_size;
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
};
/**
* drm_vram_mm_of_bdev() - \
- Returns the container of type &struct ttm_bo_device for field bdev.
+ Returns the container of type &struct ttm_device for field bdev.
* @bdev: the TTM BO device
*
* Returns:
* The containing instance of &struct drm_vram_mm
*/
static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
- struct ttm_bo_device *bdev)
+ struct ttm_device *bdev)
{
return container_of(bdev, struct drm_vram_mm, bdev);
}
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index eb706342861d..f3a4b47b3986 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1179,7 +1179,7 @@ struct drm_plane_helper_funcs {
* members in the plane structure.
*
* Drivers which always have their buffers pinned should use
- * drm_gem_fb_prepare_fb() for this hook.
+ * drm_gem_plane_helper_prepare_fb() for this hook.
*
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
@@ -1233,9 +1233,8 @@ struct drm_plane_helper_funcs {
* NOTE:
*
* This function is called in the check phase of an atomic update. The
- * driver is not allowed to change anything outside of the free-standing
- * state objects passed-in or assembled in the overall &drm_atomic_state
- * update tracking structure.
+ * driver is not allowed to change anything outside of the
+ * &drm_atomic_state update tracking structure.
*
* RETURNS:
*
@@ -1245,7 +1244,7 @@ struct drm_plane_helper_funcs {
* deadlock.
*/
int (*atomic_check)(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_update:
@@ -1263,7 +1262,7 @@ struct drm_plane_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_update)(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
*
@@ -1287,14 +1286,14 @@ struct drm_plane_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_disable)(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
/**
* @atomic_async_check:
*
- * Drivers should set this function pointer to check if the plane state
- * can be updated in a async fashion. Here async means "not vblank
- * synchronized".
+ * Drivers should set this function pointer to check if the plane's
+ * atomic state can be updated in a async fashion. Here async means
+ * "not vblank synchronized".
*
* This hook is called by drm_atomic_async_check() to establish if a
* given update can be committed asynchronously, that is, if it can
@@ -1306,7 +1305,7 @@ struct drm_plane_helper_funcs {
* can not be applied in asynchronous manner.
*/
int (*atomic_async_check)(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_async_update:
@@ -1322,11 +1321,9 @@ struct drm_plane_helper_funcs {
* update won't happen if there is an outstanding commit modifying
* the same plane.
*
- * Note that unlike &drm_plane_helper_funcs.atomic_update this hook
- * takes the new &drm_plane_state as parameter. When doing async_update
- * drivers shouldn't replace the &drm_plane_state but update the
- * current one with the new plane configurations in the new
- * plane_state.
+ * When doing async_update drivers shouldn't replace the
+ * &drm_plane_state but update the current one with the new plane
+ * configurations in the new plane_state.
*
* Drivers should also swap the framebuffers between current plane
* state (&drm_plane.state) and new_state.
@@ -1345,7 +1342,7 @@ struct drm_plane_helper_funcs {
* for deferring if needed, until a common solution is created.
*/
void (*atomic_async_update)(struct drm_plane *plane,
- struct drm_plane_state *new_state);
+ struct drm_atomic_state *state);
};
/**
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8ef06ee1c8eb..1294610e84f4 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -79,8 +79,8 @@ struct drm_plane_state {
* preserved.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb()
- * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
+ * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb()
+ * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers.
*/
struct dma_fence *fence;
@@ -538,10 +538,14 @@ struct drm_plane_funcs {
*
* For compatibility with legacy userspace, only overlay planes are made
* available to userspace by default. Userspace clients may set the
- * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
+ * &DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
* wish to receive a universal plane list containing all plane types. See also
* drm_for_each_legacy_plane().
*
+ * In addition to setting each plane's type, drivers need to setup the
+ * &drm_crtc.primary and optionally &drm_crtc.cursor pointers for legacy
+ * IOCTLs. See drm_crtc_init_with_planes().
+ *
* WARNING: The values of this enum is UABI since they're exposed in the "type"
* property.
*/
@@ -557,19 +561,20 @@ enum drm_plane_type {
/**
* @DRM_PLANE_TYPE_PRIMARY:
*
- * Primary planes represent a "main" plane for a CRTC. Primary planes
- * are the planes operated upon by CRTC modesetting and flipping
- * operations described in the &drm_crtc_funcs.page_flip and
- * &drm_crtc_funcs.set_config hooks.
+ * A primary plane attached to a CRTC is the most likely to be able to
+ * light up the CRTC when no scaling/cropping is used and the plane
+ * covers the whole CRTC.
*/
DRM_PLANE_TYPE_PRIMARY,
/**
* @DRM_PLANE_TYPE_CURSOR:
*
- * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes
- * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
- * DRM_IOCTL_MODE_CURSOR2 IOCTLs.
+ * A cursor plane attached to a CRTC is more likely to be able to be
+ * enabled when no scaling/cropping is used and the framebuffer has the
+ * size indicated by &drm_mode_config.cursor_width and
+ * &drm_mode_config.cursor_height. Additionally, if the driver doesn't
+ * support modifiers, the framebuffer should have a linear layout.
*/
DRM_PLANE_TYPE_CURSOR,
};
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index e6dbf3161c2f..ef9944e9c5fc 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -117,7 +117,7 @@ struct drm_simple_display_pipe_funcs {
* more details.
*
* Drivers which always have their buffers pinned should use
- * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook.
+ * drm_gem_simple_display_pipe_prepare_fb() for this hook.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
@@ -149,6 +149,33 @@ struct drm_simple_display_pipe_funcs {
* more details.
*/
void (*disable_vblank)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @reset_plane:
+ *
+ * Optional, called by &drm_plane_funcs.reset. Please read the
+ * documentation for the &drm_plane_funcs.reset hook for more details.
+ */
+ void (*reset_plane)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @duplicate_plane_state:
+ *
+ * Optional, called by &drm_plane_funcs.atomic_duplicate_state. Please
+ * read the documentation for the &drm_plane_funcs.atomic_duplicate_state
+ * hook for more details.
+ */
+ struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @destroy_plane_state:
+ *
+ * Optional, called by &drm_plane_funcs.atomic_destroy_state. Please
+ * read the documentation for the &drm_plane_funcs.atomic_destroy_state
+ * hook for more details.
+ */
+ void (*destroy_plane_state)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
};
/**
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index dd125f8c766c..733a3e2d1d10 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -247,7 +247,6 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
-void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 975e8a67947f..1c815e0a14ed 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -206,6 +206,12 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
return s_job && atomic_inc_return(&s_job->karma) > threshold;
}
+enum drm_gpu_sched_stat {
+ DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
+ DRM_GPU_SCHED_STAT_NOMINAL,
+ DRM_GPU_SCHED_STAT_ENODEV,
+};
+
/**
* struct drm_sched_backend_ops
*
@@ -230,10 +236,16 @@ struct drm_sched_backend_ops {
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
/**
- * @timedout_job: Called when a job has taken too long to execute,
- * to trigger GPU recovery.
+ * @timedout_job: Called when a job has taken too long to execute,
+ * to trigger GPU recovery.
+ *
+ * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
+ * and the underlying driver has started or completed recovery.
+ *
+ * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
+ * available, i.e. has been unplugged.
*/
- void (*timedout_job)(struct drm_sched_job *sched_job);
+ enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
/**
* @free_job: Called once the job's finished fence has been signaled
@@ -285,7 +297,8 @@ struct drm_gpu_scheduler {
struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t score;
+ atomic_t *score;
+ atomic_t _score;
bool ready;
bool free_guilty;
};
@@ -293,7 +306,7 @@ struct drm_gpu_scheduler {
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit, long timeout,
- const char *name);
+ atomic_t *score, const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index e17be324d95f..4fb523dfab32 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,9 +44,9 @@
#include "ttm_resource.h"
-struct ttm_bo_global;
+struct ttm_global;
-struct ttm_bo_device;
+struct ttm_device;
struct dma_buf_map;
@@ -88,7 +88,6 @@ struct ttm_tt;
* @type: The bo type.
* @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages.
- * @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is destroyed or put on the delayed delete list.
* @mem: structure describing current placement.
@@ -122,10 +121,9 @@ struct ttm_buffer_object {
* Members constant at init.
*/
- struct ttm_bo_device *bdev;
+ struct ttm_device *bdev;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
- size_t acc_size;
/**
* Members not needing protection.
@@ -313,7 +311,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
* @bulk: optional bulk move structure to remember BO positions
*
* Move this BO to the tail of all lru lists used to lookup and reserve an
- * object. This function must be called with struct ttm_bo_global::lru_lock
+ * object. This function must be called with struct ttm_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction.
*/
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
@@ -326,7 +324,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
* @bulk: bulk move structure
*
* Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
- * BO order never changes. Should be called with ttm_bo_global::lru_lock held.
+ * BO order never changes. Should be called with ttm_global::lru_lock held.
*/
void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
@@ -337,14 +335,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
* Returns
* True if the workqueue was queued at the time
*/
-int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
/**
* ttm_bo_unlock_delayed_workqueue
*
* Allows the delayed workqueue to run.
*/
-void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
+void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
/**
* ttm_bo_eviction_valuable
@@ -357,21 +355,16 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
-size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size);
-
/**
* ttm_bo_init_reserved
*
- * @bdev: Pointer to a ttm_bo_device struct.
+ * @bdev: Pointer to a ttm_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation.
- * @acc_size: Accounted size for this object.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
@@ -396,20 +389,19 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
-int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
+int ttm_bo_init_reserved(struct ttm_device *bdev,
struct ttm_buffer_object *bo,
size_t size, enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
struct ttm_operation_ctx *ctx,
- size_t acc_size, struct sg_table *sg,
- struct dma_resv *resv,
+ struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
/**
* ttm_bo_init
*
- * @bdev: Pointer to a ttm_bo_device struct.
+ * @bdev: Pointer to a ttm_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
@@ -421,7 +413,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
- * @acc_size: Accounted size for this object.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
@@ -443,10 +434,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
-int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
+int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo,
size_t size, enum ttm_bo_type type,
struct ttm_placement *placement,
- uint32_t page_alignment, bool interrubtible, size_t acc_size,
+ uint32_t page_alignment, bool interrubtible,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
@@ -537,18 +528,18 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
*
* @filp: filp as input from the mmap method.
* @vma: vma as input from the mmap method.
- * @bdev: Pointer to the ttm_bo_device with the address space manager.
+ * @bdev: Pointer to the ttm_device with the address space manager.
*
* This function is intended to be called by the device mmap method.
* if the device address space is to be backed by the bo manager.
*/
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_bo_device *bdev);
+ struct ttm_device *bdev);
/**
* ttm_bo_io
*
- * @bdev: Pointer to the struct ttm_bo_device.
+ * @bdev: Pointer to the struct ttm_device.
* @filp: Pointer to the struct file attempting to read / write.
* @wbuf: User-space pointer to address of buffer to write. NULL on read.
* @rbuf: User-space pointer to address of buffer to read into.
@@ -565,11 +556,11 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
* the function may return -ERESTARTSYS if
* interrupted by a signal.
*/
-ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write);
-int ttm_bo_swapout(struct ttm_operation_ctx *ctx);
+int ttm_bo_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the
@@ -617,7 +608,7 @@ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
--bo->pin_count;
}
-int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+int ttm_mem_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
@@ -642,5 +633,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma);
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
+bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 423348414c59..8959c0075cfd 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -37,302 +37,14 @@
#include <linux/spinlock.h>
#include <linux/dma-resv.h>
+#include <drm/ttm/ttm_device.h>
+
#include "ttm_bo_api.h"
-#include "ttm_memory.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
#include "ttm_pool.h"
/**
- * struct ttm_bo_driver
- *
- * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
- * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
- * @move: Callback for a driver to hook in accelerated functions to
- * move a buffer.
- * If set to NULL, a potentially slow memcpy() move is used.
- */
-
-struct ttm_bo_driver {
- /**
- * ttm_tt_create
- *
- * @bo: The buffer object to create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- *
- * Create a struct ttm_tt to back data with system memory pages.
- * No pages are actually allocated.
- * Returns:
- * NULL: Out of memory.
- */
- struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
- uint32_t page_flags);
-
- /**
- * ttm_tt_populate
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Allocate all backing pages
- * Returns:
- * -ENOMEM: Out of memory.
- */
- int (*ttm_tt_populate)(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm,
- struct ttm_operation_ctx *ctx);
-
- /**
- * ttm_tt_unpopulate
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Free all backing page
- */
- void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
-
- /**
- * ttm_tt_destroy
- *
- * @bdev: Pointer to a ttm device
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Destroy the backend. This will be call back from ttm_tt_destroy so
- * don't call ttm_tt_destroy from the callback or infinite loop.
- */
- void (*ttm_tt_destroy)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
-
- /**
- * struct ttm_bo_driver member eviction_valuable
- *
- * @bo: the buffer object to be evicted
- * @place: placement we need room for
- *
- * Check with the driver if it is valuable to evict a BO to make room
- * for a certain placement.
- */
- bool (*eviction_valuable)(struct ttm_buffer_object *bo,
- const struct ttm_place *place);
- /**
- * struct ttm_bo_driver member evict_flags:
- *
- * @bo: the buffer object to be evicted
- *
- * Return the bo flags for a buffer which is not mapped to the hardware.
- * These will be placed in proposed_flags so that when the move is
- * finished, they'll end up in bo->mem.flags
- * This should not cause multihop evictions, and the core will warn
- * if one is proposed.
- */
-
- void (*evict_flags)(struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
-
- /**
- * struct ttm_bo_driver member move:
- *
- * @bo: the buffer to move
- * @evict: whether this motion is evicting the buffer from
- * the graphics address space
- * @ctx: context for this move with parameters
- * @new_mem: the new memory region receiving the buffer
- @ @hop: placement for driver directed intermediate hop
- *
- * Move a buffer between two memory regions.
- * Returns errno -EMULTIHOP if driver requests a hop
- */
- int (*move)(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem,
- struct ttm_place *hop);
-
- /**
- * struct ttm_bo_driver_member verify_access
- *
- * @bo: Pointer to a buffer object.
- * @filp: Pointer to a struct file trying to access the object.
- *
- * Called from the map / write / read methods to verify that the
- * caller is permitted to access the buffer object.
- * This member may be set to NULL, which will refuse this kind of
- * access for all buffer objects.
- * This function should return 0 if access is granted, -EPERM otherwise.
- */
- int (*verify_access)(struct ttm_buffer_object *bo,
- struct file *filp);
-
- /**
- * Hook to notify driver about a resource delete.
- */
- void (*delete_mem_notify)(struct ttm_buffer_object *bo);
-
- /**
- * notify the driver that we're about to swap out this bo
- */
- void (*swap_notify)(struct ttm_buffer_object *bo);
-
- /**
- * Driver callback on when mapping io memory (for bo_move_memcpy
- * for instance). TTM will take care to call io_mem_free whenever
- * the mapping is not use anymore. io_mem_reserve & io_mem_free
- * are balanced.
- */
- int (*io_mem_reserve)(struct ttm_bo_device *bdev,
- struct ttm_resource *mem);
- void (*io_mem_free)(struct ttm_bo_device *bdev,
- struct ttm_resource *mem);
-
- /**
- * Return the pfn for a given page_offset inside the BO.
- *
- * @bo: the BO to look up the pfn for
- * @page_offset: the offset to look up
- */
- unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
- unsigned long page_offset);
-
- /**
- * Read/write memory buffers for ptrace access
- *
- * @bo: the BO to access
- * @offset: the offset from the start of the BO
- * @buf: pointer to source/destination buffer
- * @len: number of bytes to copy
- * @write: whether to read (0) from or write (non-0) to BO
- *
- * If successful, this function should return the number of
- * bytes copied, -EIO otherwise. If the number of bytes
- * returned is < len, the function may be called again with
- * the remainder of the buffer to copy.
- */
- int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
- void *buf, int len, int write);
-
- /**
- * struct ttm_bo_driver member del_from_lru_notify
- *
- * @bo: the buffer object deleted from lru
- *
- * notify driver that a BO was deleted from LRU.
- */
- void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
-
- /**
- * Notify the driver that we're about to release a BO
- *
- * @bo: BO that is about to be released
- *
- * Gives the driver a chance to do any cleanup, including
- * adding fences that may force a delayed delete
- */
- void (*release_notify)(struct ttm_buffer_object *bo);
-};
-
-/**
- * struct ttm_bo_global - Buffer object driver global data.
- *
- * @dummy_read_page: Pointer to a dummy page used for mapping requests
- * of unpopulated pages.
- * @shrink: A shrink callback object used for buffer object swap.
- * @device_list_mutex: Mutex protecting the device list.
- * This mutex is held while traversing the device list for pm options.
- * @lru_lock: Spinlock protecting the bo subsystem lru lists.
- * @device_list: List of buffer object devices.
- * @swap_lru: Lru list of buffer objects used for swapping.
- */
-
-extern struct ttm_bo_global {
-
- /**
- * Constant after init.
- */
-
- struct kobject kobj;
- struct page *dummy_read_page;
- spinlock_t lru_lock;
-
- /**
- * Protected by ttm_global_mutex.
- */
- struct list_head device_list;
-
- /**
- * Protected by the lru_lock.
- */
- struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
-
- /**
- * Internal protection.
- */
- atomic_t bo_count;
-} ttm_bo_glob;
-
-
-#define TTM_NUM_MEM_TYPES 8
-
-/**
- * struct ttm_bo_device - Buffer object driver device-specific data.
- *
- * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
- * @man: An array of resource_managers.
- * @vma_manager: Address space manager (pointer)
- * lru_lock: Spinlock that protects the buffer+device lru lists and
- * ddestroy lists.
- * @dev_mapping: A pointer to the struct address_space representing the
- * device address space.
- * @wq: Work queue structure for the delayed delete workqueue.
- *
- */
-
-struct ttm_bo_device {
-
- /*
- * Constant after bo device init / atomic.
- */
- struct list_head device_list;
- struct ttm_bo_driver *driver;
- /*
- * access via ttm_manager_type.
- */
- struct ttm_resource_manager sysman;
- struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
- /*
- * Protected by internal locks.
- */
- struct drm_vma_offset_manager *vma_manager;
- struct ttm_pool pool;
-
- /*
- * Protected by the global:lru lock.
- */
- struct list_head ddestroy;
-
- /*
- * Protected by load / firstopen / lastclose /unload sync.
- */
-
- struct address_space *dev_mapping;
-
- /*
- * Internal protection.
- */
-
- struct delayed_work wq;
-};
-
-static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev,
- int mem_type)
-{
- return bdev->man_drv[mem_type];
-}
-
-static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev,
- int type,
- struct ttm_resource_manager *manager)
-{
- bdev->man_drv[type] = manager;
-}
-
-/**
* struct ttm_lru_bulk_move_pos
*
* @first: first BO in the bulk move range
@@ -388,31 +100,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct ttm_operation_ctx *ctx);
-int ttm_bo_device_release(struct ttm_bo_device *bdev);
-
-/**
- * ttm_bo_device_init
- *
- * @bdev: A pointer to a struct ttm_bo_device to initialize.
- * @glob: A pointer to an initialized struct ttm_bo_global.
- * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
- * @dev: The core kernel device pointer for DMA mappings and allocations.
- * @mapping: The address space to use for this bo.
- * @vma_manager: A pointer to a vma manager.
- * @use_dma_alloc: If coherent DMA allocation API should be used.
- * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
- *
- * Initializes a struct ttm_bo_device:
- * Returns:
- * !0: Failure.
- */
-int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_driver *driver,
- struct device *dev,
- struct address_space *mapping,
- struct drm_vma_offset_manager *vma_manager,
- bool use_dma_alloc, bool use_dma32);
-
/**
* ttm_bo_unmap_virtual
*
@@ -494,9 +181,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&ttm_glob.lru_lock);
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&ttm_glob.lru_lock);
}
static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
@@ -538,9 +225,9 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
/*
* ttm_bo_util.c
*/
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+int ttm_mem_io_reserve(struct ttm_device *bdev,
struct ttm_resource *mem);
-void ttm_mem_io_free(struct ttm_bo_device *bdev,
+void ttm_mem_io_free(struct ttm_device *bdev,
struct ttm_resource *mem);
/**
@@ -631,7 +318,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
* Initialise a generic range manager for the selected memory type.
* The range manager is installed for this device in the type slot.
*/
-int ttm_range_man_init(struct ttm_bo_device *bdev,
+int ttm_range_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size);
@@ -643,7 +330,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev,
*
* Remove the generic range manager from a slot and tear it down.
*/
-int ttm_range_man_fini(struct ttm_bo_device *bdev,
+int ttm_range_man_fini(struct ttm_device *bdev,
unsigned type);
#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
new file mode 100644
index 000000000000..035bbc044a3b
--- /dev/null
+++ b/include/drm/ttm/ttm_device.h
@@ -0,0 +1,318 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_DEVICE_H_
+#define _TTM_DEVICE_H_
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_pool.h>
+
+#define TTM_NUM_MEM_TYPES 8
+
+struct ttm_device;
+struct ttm_placement;
+struct ttm_buffer_object;
+struct ttm_operation_ctx;
+
+/**
+ * struct ttm_global - Buffer object driver global data.
+ *
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages.
+ * @shrink: A shrink callback object used for buffer object swap.
+ * @device_list_mutex: Mutex protecting the device list.
+ * This mutex is held while traversing the device list for pm options.
+ * @lru_lock: Spinlock protecting the bo subsystem lru lists.
+ * @device_list: List of buffer object devices.
+ * @swap_lru: Lru list of buffer objects used for swapping.
+ */
+extern struct ttm_global {
+
+ /**
+ * Constant after init.
+ */
+
+ struct page *dummy_read_page;
+ spinlock_t lru_lock;
+
+ /**
+ * Protected by ttm_global_mutex.
+ */
+ struct list_head device_list;
+
+ /**
+ * Protected by the lru_lock.
+ */
+ struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
+
+ /**
+ * Internal protection.
+ */
+ atomic_t bo_count;
+} ttm_glob;
+
+struct ttm_device_funcs {
+ /**
+ * ttm_tt_create
+ *
+ * @bo: The buffer object to create the ttm for.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+
+ /**
+ * ttm_tt_populate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Allocate all backing pages
+ * Returns:
+ * -ENOMEM: Out of memory.
+ */
+ int (*ttm_tt_populate)(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
+
+ /**
+ * ttm_tt_unpopulate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Free all backing page
+ */
+ void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
+ struct ttm_tt *ttm);
+
+ /**
+ * ttm_tt_destroy
+ *
+ * @bdev: Pointer to a ttm device
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_bo_driver member eviction_valuable
+ *
+ * @bo: the buffer object to be evicted
+ * @place: placement we need room for
+ *
+ * Check with the driver if it is valuable to evict a BO to make room
+ * for a certain placement.
+ */
+ bool (*eviction_valuable)(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+ /**
+ * struct ttm_bo_driver member evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ * This should not cause multihop evictions, and the core will warn
+ * if one is proposed.
+ */
+
+ void (*evict_flags)(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
+ /**
+ * struct ttm_bo_driver member move:
+ *
+ * @bo: the buffer to move
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ * @ctx: context for this move with parameters
+ * @new_mem: the new memory region receiving the buffer
+ @ @hop: placement for driver directed intermediate hop
+ *
+ * Move a buffer between two memory regions.
+ * Returns errno -EMULTIHOP if driver requests a hop
+ */
+ int (*move)(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop);
+
+ /**
+ * struct ttm_bo_driver_member verify_access
+ *
+ * @bo: Pointer to a buffer object.
+ * @filp: Pointer to a struct file trying to access the object.
+ *
+ * Called from the map / write / read methods to verify that the
+ * caller is permitted to access the buffer object.
+ * This member may be set to NULL, which will refuse this kind of
+ * access for all buffer objects.
+ * This function should return 0 if access is granted, -EPERM otherwise.
+ */
+ int (*verify_access)(struct ttm_buffer_object *bo,
+ struct file *filp);
+
+ /**
+ * Hook to notify driver about a resource delete.
+ */
+ void (*delete_mem_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * notify the driver that we're about to swap out this bo
+ */
+ void (*swap_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * Driver callback on when mapping io memory (for bo_move_memcpy
+ * for instance). TTM will take care to call io_mem_free whenever
+ * the mapping is not use anymore. io_mem_reserve & io_mem_free
+ * are balanced.
+ */
+ int (*io_mem_reserve)(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+ void (*io_mem_free)(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+ /**
+ * Return the pfn for a given page_offset inside the BO.
+ *
+ * @bo: the BO to look up the pfn for
+ * @page_offset: the offset to look up
+ */
+ unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
+
+ /**
+ * Read/write memory buffers for ptrace access
+ *
+ * @bo: the BO to access
+ * @offset: the offset from the start of the BO
+ * @buf: pointer to source/destination buffer
+ * @len: number of bytes to copy
+ * @write: whether to read (0) from or write (non-0) to BO
+ *
+ * If successful, this function should return the number of
+ * bytes copied, -EIO otherwise. If the number of bytes
+ * returned is < len, the function may be called again with
+ * the remainder of the buffer to copy.
+ */
+ int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write);
+
+ /**
+ * struct ttm_bo_driver member del_from_lru_notify
+ *
+ * @bo: the buffer object deleted from lru
+ *
+ * notify driver that a BO was deleted from LRU.
+ */
+ void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * Notify the driver that we're about to release a BO
+ *
+ * @bo: BO that is about to be released
+ *
+ * Gives the driver a chance to do any cleanup, including
+ * adding fences that may force a delayed delete
+ */
+ void (*release_notify)(struct ttm_buffer_object *bo);
+};
+
+/**
+ * struct ttm_device - Buffer object driver device-specific data.
+ *
+ * @device_list: Our entry in the global device list.
+ * @funcs: Function table for the device.
+ * @sysman: Resource manager for the system domain.
+ * @man_drv: An array of resource_managers.
+ * @vma_manager: Address space manager.
+ * @pool: page pool for the device.
+ * @dev_mapping: A pointer to the struct address_space representing the
+ * device address space.
+ * @wq: Work queue structure for the delayed delete workqueue.
+ */
+struct ttm_device {
+ /*
+ * Constant after bo device init
+ */
+ struct list_head device_list;
+ struct ttm_device_funcs *funcs;
+
+ /*
+ * Access via ttm_manager_type.
+ */
+ struct ttm_resource_manager sysman;
+ struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
+
+ /*
+ * Protected by internal locks.
+ */
+ struct drm_vma_offset_manager *vma_manager;
+ struct ttm_pool pool;
+
+ /*
+ * Protected by the global:lru lock.
+ */
+ struct list_head ddestroy;
+
+ /*
+ * Protected by load / firstopen / lastclose /unload sync.
+ */
+ struct address_space *dev_mapping;
+
+ /*
+ * Internal protection.
+ */
+ struct delayed_work wq;
+};
+
+static inline struct ttm_resource_manager *
+ttm_manager_type(struct ttm_device *bdev, int mem_type)
+{
+ return bdev->man_drv[mem_type];
+}
+
+static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
+ struct ttm_resource_manager *manager)
+{
+ bdev->man_drv[type] = manager;
+}
+
+int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
+ struct device *dev, struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
+ bool use_dma_alloc, bool use_dma32);
+void ttm_device_fini(struct ttm_device *bdev);
+
+#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index da0ed7e8c915..6164ccf4f308 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -33,7 +33,7 @@
#define TTM_MAX_BO_PRIORITY 4U
-struct ttm_bo_device;
+struct ttm_device;
struct ttm_resource_manager;
struct ttm_resource;
struct ttm_place;
@@ -233,7 +233,7 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
-int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
+int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 6c8eb9a4de81..069f8130241a 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <drm/ttm/ttm_caching.h>
+struct ttm_bo_device;
struct ttm_tt;
struct ttm_resource;
struct ttm_buffer_object;
@@ -118,14 +119,14 @@ void ttm_tt_fini(struct ttm_tt *ttm);
*
* Unbind, unpopulate and destroy common struct ttm_tt.
*/
-void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_destroy_common:
*
* Called from driver to destroy common path.
*/
-void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_swapin:
@@ -135,7 +136,8 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
* Swap in a previously swap out ttm_tt.
*/
int ttm_tt_swapin(struct ttm_tt *ttm);
-int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
+ gfp_t gfp_flags);
/**
* ttm_tt_populate - allocate pages for a ttm
@@ -144,7 +146,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
*
* Calls the driver method to allocate pages for a ttm
*/
-int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate - free pages from a ttm
@@ -153,7 +155,10 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_o
*
* Calls the driver method to free all pages from a ttm
*/
-void ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
+
+int ttm_tt_mgr_init(void);
+void ttm_tt_mgr_fini(void);
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 5bc5c946af58..0c05561cad6e 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -51,6 +51,15 @@ struct dma_heap_export_info {
void *dma_heap_get_drvdata(struct dma_heap *heap);
/**
+ * dma_heap_get_name() - get heap name
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The char* for the heap name.
+ */
+const char *dma_heap_get_name(struct dma_heap *heap);
+
+/**
* dma_heap_add - adds a heap to dmabuf heaps
* @exp_info: information needed to register this heap
*/
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 9850d59d6f1c..c8ec982ff498 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -156,7 +156,7 @@ enum hdmi_content_type {
};
enum hdmi_metadata_type {
- HDMI_STATIC_METADATA_TYPE1 = 1,
+ HDMI_STATIC_METADATA_TYPE1 = 0,
};
enum hdmi_eotf {
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 7b7ebf2e28ec..1c746139d5e3 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -317,6 +317,10 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
} while (0)
+#define lockdep_assert_none_held_once() do { \
+ WARN_ON_ONCE(debug_locks && current->lockdep_depth); \
+ } while (0)
+
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
#define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
@@ -396,6 +400,7 @@ extern int lockdep_is_held(const void *);
#define lockdep_assert_held_write(l) do { (void)(l); } while (0)
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
+#define lockdep_assert_none_held_once() do { } while (0)
#define lockdep_recursing(tsk) (0)
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
index ca8337695c2a..27ea99af6e1d 100644
--- a/include/linux/platform_data/simplefb.h
+++ b/include/linux/platform_data/simplefb.h
@@ -16,6 +16,7 @@
#define SIMPLEFB_FORMATS \
{ \
{ "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \
+ { "r5g5b5a1", 16, {11, 5}, {6, 5}, {1, 5}, {0, 1}, DRM_FORMAT_RGBA5551 }, \
{ "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \
{ "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \
{ "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 1c064627e6c3..d1a93d2a85f9 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -990,7 +990,7 @@ struct drm_format_modifier {
};
/**
- * struct drm_mode_create_blob - Create New block property
+ * struct drm_mode_create_blob - Create New blob property
*
* Create a new 'blob' data property, copying length bytes from data pointer,
* and returning new blob ID.
diff --git a/lib/test_printf.c b/lib/test_printf.c
index 95a2f82427c7..a8c0ad4fda2b 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -655,6 +655,23 @@ static void __init fwnode_pointer(void)
software_node_unregister_nodes(softnodes);
}
+static void __init fourcc_pointer(void)
+{
+ struct {
+ u32 code;
+ char *str;
+ } const try[] = {
+ { 0x3231564e, "NV12 little-endian (0x3231564e)", },
+ { 0xb231564e, "NV12 big-endian (0xb231564e)", },
+ { 0x10111213, ".... little-endian (0x10111213)", },
+ { 0x20303159, "Y10 little-endian (0x20303159)", },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(try); i++)
+ test(try[i].str, "%p4cc", &try[i].code);
+}
+
static void __init
errptr(void)
{
@@ -700,6 +717,7 @@ test_pointer(void)
flags();
errptr();
fwnode_pointer();
+ fourcc_pointer();
}
static void __init selftest(void)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 41ddc353ebb8..35dcc50532d8 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1734,6 +1734,42 @@ char *netdev_bits(char *buf, char *end, const void *addr,
}
static noinline_for_stack
+char *fourcc_string(char *buf, char *end, const u32 *fourcc,
+ struct printf_spec spec, const char *fmt)
+{
+ char output[sizeof("0123 little-endian (0x01234567)")];
+ char *p = output;
+ unsigned int i;
+ u32 val;
+
+ if (fmt[1] != 'c' || fmt[2] != 'c')
+ return error_string(buf, end, "(%p4?)", spec);
+
+ if (check_pointer(&buf, end, fourcc, spec))
+ return buf;
+
+ val = *fourcc & ~BIT(31);
+
+ for (i = 0; i < sizeof(*fourcc); i++) {
+ unsigned char c = val >> (i * 8);
+
+ /* Print non-control ASCII characters as-is, dot otherwise */
+ *p++ = isascii(c) && isprint(c) ? c : '.';
+ }
+
+ strcpy(p, *fourcc & BIT(31) ? " big-endian" : " little-endian");
+ p += strlen(p);
+
+ *p++ = ' ';
+ *p++ = '(';
+ p = special_hex_number(p, output + sizeof(output) - 2, *fourcc, sizeof(u32));
+ *p++ = ')';
+ *p = '\0';
+
+ return string(buf, end, output, spec);
+}
+
+static noinline_for_stack
char *address_val(char *buf, char *end, const void *addr,
struct printf_spec spec, const char *fmt)
{
@@ -2188,6 +2224,7 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
* correctness of the format string and va_list arguments.
* - 'K' For a kernel pointer that should be hidden from unprivileged users
* - 'NF' For a netdev_features_t
+ * - '4cc' V4L2 or DRM FourCC code, with endianness and raw numerical value.
* - 'h[CDN]' For a variable-length buffer, it prints it as a hex string with
* a certain separator (' ' by default):
* C colon
@@ -2285,6 +2322,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return restricted_pointer(buf, end, ptr, spec);
case 'N':
return netdev_bits(buf, end, ptr, spec, fmt);
+ case '4':
+ return fourcc_string(buf, end, ptr, spec, fmt);
case 'a':
return address_val(buf, end, ptr, spec, fmt);
case 'd':
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index df8b23dc1eb0..73f22fe2d60c 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -6606,9 +6606,11 @@ sub process {
$specifier = $1;
$extension = $2;
$qualifier = $3;
- if ($extension !~ /[SsBKRraEehMmIiUDdgVCbGNOxtf]/ ||
+ if ($extension !~ /[4SsBKRraEehMmIiUDdgVCbGNOxtf]/ ||
($extension eq "f" &&
- defined $qualifier && $qualifier !~ /^w/)) {
+ defined $qualifier && $qualifier !~ /^w/) ||
+ ($extension eq "4" &&
+ defined $qualifier && $qualifier !~ /^cc/)) {
$bad_specifier = $specifier;
last;
}