summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/connector/dvi-connector.txt1
-rw-r--r--Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt24
-rw-r--r--Documentation/devicetree/bindings/display/msm/dsi.txt26
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/display-timing.txt5
-rw-r--r--Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt25
-rw-r--r--Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt25
-rw-r--r--Documentation/devicetree/bindings/display/panel/simple-panel.txt4
-rw-r--r--Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt39
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/gpu/drivers.rst21
-rw-r--r--Documentation/gpu/drm-kms.rst3
-rw-r--r--Documentation/gpu/index.rst9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c20
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c51
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c26
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h15
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c141
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h11
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c151
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h11
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c38
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c9
-rw-r--r--drivers/gpu/drm/drm_atomic.c42
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c16
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c5
-rw-r--r--drivers/gpu/drm/drm_modes.c34
-rw-r--r--drivers/gpu/drm/drm_plane.c2
-rw-r--r--drivers/gpu/drm/drm_print.c65
-rw-r--r--drivers/gpu/drm/drm_property.c1
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig1
-rw-r--r--drivers/gpu/drm/etnaviv/Makefile4
-rw-r--r--drivers/gpu/drm/etnaviv/common.xml.h281
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_buffer.c18
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c52
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c21
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c68
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c406
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h54
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_hwdb.c65
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c78
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c170
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.h35
-rw-r--r--drivers/gpu/drm/etnaviv/state.xml.h256
-rw-r--r--drivers/gpu/drm/etnaviv/state_3d.xml.h5
-rw-r--r--drivers/gpu/drm/etnaviv/state_blt.xml.h52
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h150
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c23
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c32
-rw-r--r--drivers/gpu/drm/i915/i915_request.c2
-rw-r--r--drivers/gpu/drm/i915/intel_color.c32
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c15
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c37
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c22
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c219
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c347
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h1
-rw-r--r--drivers/gpu/drm/msm/Kconfig20
-rw-r--r--drivers/gpu/drm/msm/Makefile50
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c187
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c99
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c26
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c52
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c70
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h14
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c)6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c)2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h)2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c (renamed from drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c)4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c)13
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c)60
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h)2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c)5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c)2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h)10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c)2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h (renamed from drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h (renamed from drivers/gpu/drm/msm/mdp/mdp_common.xml.h)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_format.c (renamed from drivers/gpu/drm/msm/mdp/mdp_format.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.c (renamed from drivers/gpu/drm/msm/mdp/mdp_kms.c)0
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_kms.h (renamed from drivers/gpu/drm/msm/mdp/mdp_kms.h)0
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c25
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h187
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c19
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c47
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c6
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c251
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll.h9
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c822
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c2
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c6
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c13
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c12
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h1
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c50
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c146
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c40
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c40
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c39
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c69
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c37
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c44
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c38
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c42
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c38
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c41
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c49
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c32
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c27
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2346
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c91
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c1553
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c823
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h245
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c406
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c418
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c15
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h213
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c53
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c44
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c162
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c453
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c19
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c74
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_priv.h10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c46
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.h22
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c122
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c38
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.h9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c41
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c38
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c16
-rw-r--r--drivers/gpu/drm/omapdrm/tcm-sita.c12
-rw-r--r--drivers/gpu/drm/omapdrm/tcm.h4
-rw-r--r--drivers/gpu/drm/panel/Kconfig9
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c4
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c2
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c21
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c448
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c82
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c33
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c74
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c154
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h18
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c55
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c52
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c1
-rw-r--r--drivers/gpu/drm/tegra/dc.c82
-rw-r--r--drivers/gpu/drm/tegra/dc.h1
-rw-r--r--drivers/gpu/drm/tegra/drm.c36
-rw-r--r--drivers/gpu/drm/tegra/drm.h14
-rw-r--r--drivers/gpu/drm/tegra/fb.c25
-rw-r--r--drivers/gpu/drm/tegra/gem.c69
-rw-r--r--drivers/gpu/drm/tegra/gem.h5
-rw-r--r--drivers/gpu/drm/tegra/hub.c125
-rw-r--r--drivers/gpu/drm/tegra/hub.h17
-rw-r--r--drivers/gpu/drm/tegra/plane.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c85
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c25
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h60
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c68
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c506
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c51
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c80
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h64
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c106
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c177
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c163
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c280
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c8
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c152
-rw-r--r--drivers/pci/pci-driver.c17
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pci/quirks.c39
-rw-r--r--include/drm/bridge/analogix_dp.h2
-rw-r--r--include/drm/drm_color_mgmt.h12
-rw-r--r--include/drm/drm_dp_helper.h1
-rw-r--r--include/drm/drm_mode_object.h24
-rw-r--r--include/drm/drm_print.h119
-rw-r--r--include/drm/drm_property.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h4
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/vga_switcheroo.h6
-rw-r--r--include/sound/hdaudio.h3
-rw-r--r--include/uapi/drm/etnaviv_drm.h6
-rw-r--r--include/uapi/drm/msm_drm.h2
-rw-r--r--scripts/coccinelle/api/drm-get-put.cocci10
-rw-r--r--sound/pci/hda/hda_intel.c36
-rw-r--r--sound/pci/hda/hda_intel.h3
281 files changed, 11500 insertions, 5863 deletions
diff --git a/Documentation/devicetree/bindings/display/connector/dvi-connector.txt b/Documentation/devicetree/bindings/display/connector/dvi-connector.txt
index fc53f7c60bc6..207e42e9eba0 100644
--- a/Documentation/devicetree/bindings/display/connector/dvi-connector.txt
+++ b/Documentation/devicetree/bindings/display/connector/dvi-connector.txt
@@ -10,6 +10,7 @@ Optional properties:
- analog: the connector has DVI analog pins
- digital: the connector has DVI digital pins
- dual-link: the connector has pins for DVI dual-link
+- hpd-gpios: HPD GPIO number
Required nodes:
- Video port for DVI input
diff --git a/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
index 05176f1ae108..8def11b16a24 100644
--- a/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
+++ b/Documentation/devicetree/bindings/display/etnaviv/etnaviv-drm.txt
@@ -1,23 +1,3 @@
-Etnaviv DRM master device
-=========================
-
-The Etnaviv DRM master device is a virtual device needed to list all
-Vivante GPU cores that comprise the GPU subsystem.
-
-Required properties:
-- compatible: Should be one of
- "fsl,imx-gpu-subsystem"
- "marvell,dove-gpu-subsystem"
-- cores: Should contain a list of phandles pointing to Vivante GPU devices
-
-example:
-
-gpu-subsystem {
- compatible = "fsl,imx-gpu-subsystem";
- cores = <&gpu_2d>, <&gpu_3d>;
-};
-
-
Vivante GPU core devices
========================
@@ -32,7 +12,9 @@ Required properties:
- clocks: should contain one clock for entry in clock-names
see Documentation/devicetree/bindings/clock/clock-bindings.txt
- clock-names:
- - "bus": AXI/register clock
+ - "bus": AXI/master interface clock
+ - "reg": AHB/slave interface clock
+ (only required if GPU can gate slave interface independently)
- "core": GPU core clock
- "shader": Shader clock (only required if GPU has feature PIPE_3D)
diff --git a/Documentation/devicetree/bindings/display/msm/dsi.txt b/Documentation/devicetree/bindings/display/msm/dsi.txt
index a6671bd2c85a..518e9cdf0d4b 100644
--- a/Documentation/devicetree/bindings/display/msm/dsi.txt
+++ b/Documentation/devicetree/bindings/display/msm/dsi.txt
@@ -7,8 +7,6 @@ Required properties:
- reg: Physical base address and length of the registers of controller
- reg-names: The names of register regions. The following regions are required:
* "dsi_ctrl"
-- qcom,dsi-host-index: The ID of DSI controller hardware instance. This should
- be 0 or 1, since we have 2 DSI controllers at most for now.
- interrupts: The interrupt signal from the DSI block.
- power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks.
@@ -22,6 +20,8 @@ Required properties:
* "core"
For DSIv2, we need an additional clock:
* "src"
+ For DSI6G v2.0 onwards, we need also need the clock:
+ * "byte_intf"
- assigned-clocks: Parents of "byte" and "pixel" for the given platform.
- assigned-clock-parents: The Byte clock and Pixel clock PLL outputs provided
by a DSI PHY block. See [1] for details on clock bindings.
@@ -88,21 +88,35 @@ Required properties:
* "qcom,dsi-phy-28nm-lp"
* "qcom,dsi-phy-20nm"
* "qcom,dsi-phy-28nm-8960"
-- reg: Physical base address and length of the registers of PLL, PHY and PHY
- regulator
+ * "qcom,dsi-phy-14nm"
+ * "qcom,dsi-phy-10nm"
+- reg: Physical base address and length of the registers of PLL, PHY. Some
+ revisions require the PHY regulator base address, whereas others require the
+ PHY lane base address. See below for each PHY revision.
- reg-names: The names of register regions. The following regions are required:
+ For DSI 28nm HPM/LP/8960 PHYs and 20nm PHY:
* "dsi_pll"
* "dsi_phy"
* "dsi_phy_regulator"
+ For DSI 14nm and 10nm PHYs:
+ * "dsi_pll"
+ * "dsi_phy"
+ * "dsi_phy_lane"
- clock-cells: Must be 1. The DSI PHY block acts as a clock provider, creating
2 clocks: A byte clock (index 0), and a pixel clock (index 1).
-- qcom,dsi-phy-index: The ID of DSI PHY hardware instance. This should
- be 0 or 1, since we have 2 DSI PHYs at most for now.
- power-domains: Should be <&mmcc MDSS_GDSC>.
- clocks: Phandles to device clocks. See [1] for details on clock bindings.
- clock-names: the following clocks are required:
* "iface"
+ For 28nm HPM/LP, 28nm 8960 PHYs:
+- vddio-supply: phandle to vdd-io regulator device node
+ For 20nm PHY:
- vddio-supply: phandle to vdd-io regulator device node
+- vcca-supply: phandle to vcca regulator device node
+ For 14nm PHY:
+- vcca-supply: phandle to vcca regulator device node
+ For 10nm PHY:
+- vdds-supply: phandle to vdds regulator device node
Optional properties:
- qcom,dsi-phy-regulator-ldo-mode: Boolean value indicating if the LDO mode PHY
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt b/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt
new file mode 100644
index 000000000000..85626edf63e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt
@@ -0,0 +1,12 @@
+AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
+
+Required properties:
+- compatible: should be "auo,g104sn02"
+- power-supply: as specified in the base binding
+
+Optional properties:
+- backlight: as specified in the base binding
+- enable-gpios: as specified in the base binding
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/display-timing.txt b/Documentation/devicetree/bindings/display/panel/display-timing.txt
index 58fa3e48481d..78222ced1874 100644
--- a/Documentation/devicetree/bindings/display/panel/display-timing.txt
+++ b/Documentation/devicetree/bindings/display/panel/display-timing.txt
@@ -80,6 +80,11 @@ The parameters are defined as:
| | v | | |
+----------+-------------------------------------+----------+-------+
+Note: In addition to being used as subnode(s) of display-timings, the timing
+ subnode may also be used on its own. This is appropriate if only one mode
+ need be conveyed. In this case, the node should be named 'panel-timing'.
+
+
Example:
display-timings {
diff --git a/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt b/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt
new file mode 100644
index 000000000000..6a036ede3e28
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt
@@ -0,0 +1,25 @@
+Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
+
+This binding is compatible with the simple-panel binding, which is specified
+in simple-panel.txt in this directory.
+
+Required properties:
+- compatible: should be "koe,tx31d200vm0baa"
+
+Optional properties:
+- backlight: phandle of the backlight device attached to the panel
+
+Optional nodes:
+- Video port for LVDS panel input.
+
+Example:
+ panel {
+ compatible = "koe,tx31d200vm0baa";
+ backlight = <&backlight_lvds>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
index 6862028e7b2e..203b03eefb68 100644
--- a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
+++ b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
@@ -9,6 +9,7 @@ Required properties:
Optional properties:
- reset-gpios: a GPIO spec for the reset pin (active low).
+ - power-supply: phandle of the regulator that provides the supply voltage.
Example:
&dsi {
@@ -17,5 +18,6 @@ Example:
compatible = "orisetech,otm8009a";
reg = <0>;
reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
+ power-supply = <&v1v8>;
};
};
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt
new file mode 100644
index 000000000000..cbb79ef3bfc9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt
@@ -0,0 +1,25 @@
+Raydium Semiconductor Corporation RM68200 5.5" 720p MIPI-DSI TFT LCD panel
+
+The Raydium Semiconductor Corporation RM68200 is a 5.5" 720x1280 TFT LCD
+panel connected using a MIPI-DSI video interface.
+
+Required properties:
+ - compatible: "raydium,rm68200"
+ - reg: the virtual channel number of a DSI peripheral
+
+Optional properties:
+ - reset-gpios: a GPIO spec for the reset pin (active low).
+ - power-supply: phandle of the regulator that provides the supply voltage.
+ - backlight: phandle of the backlight device attached to the panel.
+
+Example:
+&dsi {
+ ...
+ panel@0 {
+ compatible = "raydium,rm68200";
+ reg = <0>;
+ reset-gpios = <&gpiof 15 GPIO_ACTIVE_LOW>;
+ power-supply = <&v1v8>;
+ backlight = <&pwm_backlight>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/panel/simple-panel.txt b/Documentation/devicetree/bindings/display/panel/simple-panel.txt
index 16d8ff088b7d..45a457ad38f0 100644
--- a/Documentation/devicetree/bindings/display/panel/simple-panel.txt
+++ b/Documentation/devicetree/bindings/display/panel/simple-panel.txt
@@ -1,4 +1,8 @@
Simple display panel
+====================
+
+panel node
+----------
Required properties:
- power-supply: See panel-common.txt
diff --git a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
index 8bdef4920edc..3346c1e2a7a0 100644
--- a/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
+++ b/Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
@@ -146,13 +146,16 @@ Required properties:
* allwinner,sun8i-a83t-tcon-lcd
* allwinner,sun8i-a83t-tcon-tv
* allwinner,sun8i-v3s-tcon
+ * allwinner,sun9i-a80-tcon-lcd
+ * allwinner,sun9i-a80-tcon-tv
- reg: base address and size of memory-mapped region
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the TCON.
- 'ahb': the interface clocks
- - 'tcon-ch0': The clock driving the TCON channel 0, except for A83T TV TCON
+ - 'tcon-ch0': The clock driving the TCON channel 0, if supported
- resets: phandles to the reset controllers driving the encoder
- - "lcd": the reset line for the TCON channel 0
+ - "lcd": the reset line for the TCON
+ - "edp": the reset line for the eDP block (A80 only)
- clock-names: the clock names mentioned above
- reset-names: the reset names mentioned above
@@ -171,7 +174,9 @@ Required properties:
channel the endpoint is associated to. If that property is not
present, the endpoint number will be used as the channel number.
-On SoCs other than the A33 and V3s, there is one more clock required:
+For TCONs with channel 0, there is one more clock required:
+ - 'tcon-ch0': The clock driving the TCON channel 0
+For TCONs with channel 1, there is one more clock required:
- 'tcon-ch1': The clock driving the TCON channel 1
When TCON support LVDS (all TCONs except TV TCON on A83T and those found
@@ -186,7 +191,7 @@ DRC
---
The DRC (Dynamic Range Controller), found in the latest Allwinner SoCs
-(A31, A23, A33), allows to dynamically adjust pixel
+(A31, A23, A33, A80), allows to dynamically adjust pixel
brightness/contrast based on histogram measurements for LCD content
adaptive backlight control.
@@ -196,6 +201,7 @@ Required properties:
* allwinner,sun6i-a31-drc
* allwinner,sun6i-a31s-drc
* allwinner,sun8i-a33-drc
+ * allwinner,sun9i-a80-drc
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the DRC
@@ -222,6 +228,7 @@ Required properties:
* allwinner,sun6i-a31-display-backend
* allwinner,sun7i-a20-display-backend
* allwinner,sun8i-a33-display-backend
+ * allwinner,sun9i-a80-display-backend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the frontend and backend
@@ -243,6 +250,28 @@ On the A33, some additional properties are required:
- resets and reset-names need to have a phandle to the SAT bus
resets, whose name will be "sat"
+DEU
+---
+
+The DEU (Detail Enhancement Unit), found in the Allwinner A80 SoC,
+can sharpen the display content in both luma and chroma channels.
+
+Required properties:
+ - compatible: value must be one of:
+ * allwinner,sun9i-a80-deu
+ - reg: base address and size of the memory-mapped region.
+ - interrupts: interrupt associated to this IP
+ - clocks: phandles to the clocks feeding the DEU
+ * ahb: the DEU interface clock
+ * mod: the DEU module clock
+ * ram: the DEU DRAM clock
+ - clock-names: the clock names mentioned above
+ - resets: phandles to the reset line driving the DEU
+
+- ports: A ports node with endpoint definitions as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt. The
+ first port should be the input endpoints, the second one the outputs
+
Display Engine Frontend
-----------------------
@@ -256,6 +285,7 @@ Required properties:
* allwinner,sun6i-a31-display-frontend
* allwinner,sun7i-a20-display-frontend
* allwinner,sun8i-a33-display-frontend
+ * allwinner,sun9i-a80-display-frontend
- reg: base address and size of the memory-mapped region.
- interrupts: interrupt associated to this IP
- clocks: phandles to the clocks feeding the frontend and backend
@@ -312,6 +342,7 @@ Required properties:
* allwinner,sun8i-a83t-display-engine
* allwinner,sun8i-h3-display-engine
* allwinner,sun8i-v3s-display-engine
+ * allwinner,sun9i-a80-display-engine
- allwinner,pipelines: list of phandle to the display engine
frontends (DE 1.0) or mixers (DE 2.0) available.
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index ae850d6c0ad3..12e8b3e576b0 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -104,6 +104,7 @@ eeti eGalax_eMPIA Technology Inc
elan Elan Microelectronic Corp.
embest Shenzhen Embest Technology Co., Ltd.
emmicro EM Microelectronic
+emtrion emtrion GmbH
energymicro Silicon Laboratories (formerly Energy Micro AS)
engicam Engicam S.r.l.
epcos EPCOS AG
diff --git a/Documentation/gpu/drivers.rst b/Documentation/gpu/drivers.rst
new file mode 100644
index 000000000000..e8c84419a2a1
--- /dev/null
+++ b/Documentation/gpu/drivers.rst
@@ -0,0 +1,21 @@
+========================
+GPU Driver Documentation
+========================
+
+.. toctree::
+
+ i915
+ meson
+ pl111
+ tegra
+ tinydrm
+ tve200
+ vc4
+ bridge/dw-hdmi
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/gpu/drm-kms.rst b/Documentation/gpu/drm-kms.rst
index 56a3780e39b8..1dffd1ac4cd4 100644
--- a/Documentation/gpu/drm-kms.rst
+++ b/Documentation/gpu/drm-kms.rst
@@ -286,6 +286,9 @@ Atomic Mode Setting Function Reference
.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
:export:
+.. kernel-doc:: drivers/gpu/drm/drm_atomic.c
+ :internal:
+
CRTC Abstraction
================
diff --git a/Documentation/gpu/index.rst b/Documentation/gpu/index.rst
index c36586dad29d..00288f34c5a6 100644
--- a/Documentation/gpu/index.rst
+++ b/Documentation/gpu/index.rst
@@ -10,16 +10,9 @@ Linux GPU Driver Developer's Guide
drm-kms
drm-kms-helpers
drm-uapi
- i915
- meson
- pl111
- tegra
- tinydrm
- tve200
- vc4
+ drivers
vga-switcheroo
vgaarbiter
- bridge/dw-hdmi
todo
.. only:: subproject and html
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 1bfce79bc074..7379aa5a6849 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -744,7 +744,6 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
ret = amdgpu_device_suspend(drm_dev, false, false);
pci_save_state(pdev);
@@ -781,7 +780,6 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
ret = amdgpu_device_resume(drm_dev, false, false);
drm_kms_helper_poll_enable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
return 0;
}
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 904fff80917b..fcc62bc60f6a 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -288,8 +288,14 @@ static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 ||
(v_upscale_factor >> 16) >= 2);
- s->input_w = pstate->src_w >> 16;
- s->input_h = pstate->src_h >> 16;
+ if (pstate->rotation & MALIDP_ROTATED_MASK) {
+ s->input_w = pstate->src_h >> 16;
+ s->input_h = pstate->src_w >> 16;
+ } else {
+ s->input_w = pstate->src_w >> 16;
+ s->input_h = pstate->src_h >> 16;
+ }
+
s->output_w = pstate->crtc_w;
s->output_h = pstate->crtc_h;
@@ -525,14 +531,13 @@ int malidp_crtc_init(struct drm_device *drm)
if (!primary) {
DRM_ERROR("no primary plane found\n");
- ret = -EINVAL;
- goto crtc_cleanup_planes;
+ return -EINVAL;
}
ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
&malidp_crtc_funcs, NULL);
if (ret)
- goto crtc_cleanup_planes;
+ return ret;
drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
drm_mode_crtc_set_gamma_size(&malidp->crtc, MALIDP_GAMMA_LUT_SIZE);
@@ -542,9 +547,4 @@ int malidp_crtc_init(struct drm_device *drm)
malidp_se_set_enh_coeffs(malidp->dev);
return 0;
-
-crtc_cleanup_planes:
- malidp_de_planes_destroy(drm);
-
- return ret;
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 3d82712d8002..8d20faa198cf 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -185,25 +185,29 @@ static int malidp_set_and_wait_config_valid(struct drm_device *drm)
static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
{
- struct drm_pending_vblank_event *event;
struct drm_device *drm = state->dev;
struct malidp_drm *malidp = drm->dev_private;
- if (malidp->crtc.enabled) {
- /* only set config_valid if the CRTC is enabled */
- if (malidp_set_and_wait_config_valid(drm))
- DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
- }
+ malidp->event = malidp->crtc.state->event;
+ malidp->crtc.state->event = NULL;
- event = malidp->crtc.state->event;
- if (event) {
- malidp->crtc.state->event = NULL;
+ if (malidp->crtc.state->active) {
+ /*
+ * if we have an event to deliver to userspace, make sure
+ * the vblank is enabled as we are sending it from the IRQ
+ * handler.
+ */
+ if (malidp->event)
+ drm_crtc_vblank_get(&malidp->crtc);
+ /* only set config_valid if the CRTC is enabled */
+ if (malidp_set_and_wait_config_valid(drm) < 0)
+ DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+ } else if (malidp->event) {
+ /* CRTC inactive means vblank IRQ is disabled, send event directly */
spin_lock_irq(&drm->event_lock);
- if (drm_crtc_vblank_get(&malidp->crtc) == 0)
- drm_crtc_arm_vblank_event(&malidp->crtc, event);
- else
- drm_crtc_send_vblank_event(&malidp->crtc, event);
+ drm_crtc_send_vblank_event(&malidp->crtc, malidp->event);
+ malidp->event = NULL;
spin_unlock_irq(&drm->event_lock);
}
drm_atomic_helper_commit_hw_done(state);
@@ -232,8 +236,6 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
malidp_atomic_commit_hw_done(state);
- drm_atomic_helper_wait_for_vblanks(drm, state);
-
pm_runtime_put(drm->dev);
drm_atomic_helper_cleanup_planes(drm, state);
@@ -276,7 +278,7 @@ static int malidp_init(struct drm_device *drm)
static void malidp_fini(struct drm_device *drm)
{
- malidp_de_planes_destroy(drm);
+ drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
}
@@ -312,13 +314,26 @@ static int malidp_irq_init(struct platform_device *pdev)
DEFINE_DRM_GEM_CMA_FOPS(fops);
+static int malidp_dumb_create(struct drm_file *file_priv,
+ struct drm_device *drm,
+ struct drm_mode_create_dumb *args)
+{
+ struct malidp_drm *malidp = drm->dev_private;
+ /* allocate for the worst case scenario, i.e. rotated buffers */
+ u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 1);
+
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment);
+
+ return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
+}
+
static struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
DRIVER_PRIME,
.lastclose = drm_fb_helper_lastclose,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = malidp_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
@@ -662,8 +677,10 @@ static void malidp_unbind(struct device *dev)
drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
+ drm_crtc_vblank_off(&malidp->crtc);
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
+ drm->irq_enabled = false;
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index e0d12c9fc6b8..c70989b93387 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -22,6 +22,7 @@ struct malidp_drm {
struct malidp_hw_device *dev;
struct drm_crtc crtc;
wait_queue_head_t wq;
+ struct drm_pending_vblank_event *event;
atomic_t config_valid;
u32 core_id;
};
@@ -59,7 +60,6 @@ struct malidp_crtc_state {
#define to_malidp_crtc_state(x) container_of(x, struct malidp_crtc_state, base)
int malidp_de_planes_init(struct drm_device *drm);
-void malidp_de_planes_destroy(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
/* often used combination of rotational bits */
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 2bfb542135ac..d789b46dc817 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -75,16 +75,16 @@ static const struct malidp_format_id malidp550_de_formats[] = {
};
static const struct malidp_layer malidp500_layers[] = {
- { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
- { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, MALIDP_DE_LG_STRIDE },
- { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, MALIDP_DE_LG_STRIDE },
+ { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB },
+ { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE, MALIDP_DE_LG_STRIDE, 0 },
+ { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE, MALIDP_DE_LG_STRIDE, 0 },
};
static const struct malidp_layer malidp550_layers[] = {
- { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
- { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
- { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
- { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
+ { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB },
+ { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE, 0 },
+ { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB },
+ { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE, 0 },
};
#define SE_N_SCALING_COEFFS 96
@@ -782,9 +782,15 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
/* first handle the config valid IRQ */
dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
if (dc_status & hw->map.dc_irq_map.vsync_irq) {
- /* we have a page flip event */
- atomic_set(&malidp->config_valid, 1);
malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
+ /* do we have a page flip event? */
+ if (malidp->event != NULL) {
+ spin_lock(&drm->event_lock);
+ drm_crtc_send_vblank_event(&malidp->crtc, malidp->event);
+ malidp->event = NULL;
+ spin_unlock(&drm->event_lock);
+ }
+ atomic_set(&malidp->config_valid, 1);
ret = IRQ_WAKE_THREAD;
}
@@ -794,7 +800,7 @@ static irqreturn_t malidp_de_irq(int irq, void *arg)
mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
status &= mask;
- if (status & de->vsync_irq)
+ if ((status & de->vsync_irq) && malidp->crtc.enabled)
drm_crtc_handle_vblank(&malidp->crtc);
malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index b0690ebb3565..b5dd6c73ec9f 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -58,7 +58,8 @@ struct malidp_layer {
u16 id; /* layer ID */
u16 base; /* address offset for the register bank */
u16 ptr; /* address offset for the pointer register */
- u16 stride_offset; /* Offset to the first stride register. */
+ u16 stride_offset; /* offset to the first stride register. */
+ s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */
};
enum malidp_scaling_coeff_set {
@@ -285,10 +286,16 @@ void malidp_se_irq_fini(struct drm_device *drm);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
u8 layer_id, u32 format);
-static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
- unsigned int pitch)
+static inline u8 malidp_hw_get_pitch_align(struct malidp_hw_device *hwdev, bool rotated)
{
- return !(pitch & (hwdev->hw->map.bus_align_bytes - 1));
+ /*
+ * only hardware that cannot do 8 bytes bus alignments have further
+ * constraints on rotated planes
+ */
+ if (hwdev->hw->map.bus_align_bytes == 8)
+ return 8;
+ else
+ return hwdev->hw->map.bus_align_bytes << (rotated ? 2 : 0);
}
/* U16.16 */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index ee32361c87ac..7a44897c50fe 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -35,6 +35,9 @@
#define LAYER_COMP_MASK (0x3 << 12)
#define LAYER_COMP_PIXEL (0x3 << 12)
#define LAYER_COMP_PLANE (0x2 << 12)
+#define LAYER_ALPHA_OFFSET (16)
+#define LAYER_ALPHA_MASK (0xff)
+#define LAYER_ALPHA(x) (((x) & LAYER_ALPHA_MASK) << LAYER_ALPHA_OFFSET)
#define MALIDP_LAYER_COMPOSE 0x008
#define MALIDP_LAYER_SIZE 0x00c
#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
@@ -56,12 +59,8 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
{
struct malidp_plane *mp = to_malidp_plane(plane);
- if (mp->base.fb)
- drm_framebuffer_put(mp->base.fb);
-
- drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
- devm_kfree(plane->dev->dev, mp);
+ kfree(mp);
}
/*
@@ -147,13 +146,21 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
if (!crtc_state)
return -EINVAL;
+ mc = to_malidp_crtc_state(crtc_state);
+
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
0, INT_MAX, true, true);
if (ret)
return ret;
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ if (state->rotation & MALIDP_ROTATED_MASK) {
+ src_w = state->src_h >> 16;
+ src_h = state->src_w >> 16;
+ } else {
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+ }
+
if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) {
/* Scaling not necessary for this plane. */
mc->scaled_planes_mask &= ~(mp->layer->id);
@@ -163,8 +170,6 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
if (mp->layer->id & (DE_SMART | DE_GRAPHICS2))
return -EINVAL;
- mc = to_malidp_crtc_state(crtc_state);
-
mc->scaled_planes_mask |= mp->layer->id;
/* Defer scaling requirements calculation to the crtc check. */
return 0;
@@ -175,6 +180,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
{
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(state);
+ bool rotated = state->rotation & MALIDP_ROTATED_MASK;
struct drm_framebuffer *fb;
int i, ret;
@@ -191,7 +197,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
ms->n_planes = fb->format->num_planes;
for (i = 0; i < ms->n_planes; i++) {
- if (!malidp_hw_pitch_valid(mp->hwdev, fb->pitches[i])) {
+ u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
+ if (fb->pitches[i] & (alignment - 1)) {
DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
fb->pitches[i], i);
return -EINVAL;
@@ -259,6 +266,60 @@ static void malidp_de_set_plane_pitches(struct malidp_plane *mp,
mp->layer->stride_offset + i * 4);
}
+static const s16
+malidp_yuv2rgb_coeffs[][DRM_COLOR_RANGE_MAX][MALIDP_COLORADJ_NUM_COEFFS] = {
+ [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 1192, 0, 1634,
+ 1192, -401, -832,
+ 1192, 2066, 0,
+ 64, 512, 512
+ },
+ [DRM_COLOR_YCBCR_BT601][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 1024, 0, 1436,
+ 1024, -352, -731,
+ 1024, 1815, 0,
+ 0, 512, 512
+ },
+ [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 1192, 0, 1836,
+ 1192, -218, -546,
+ 1192, 2163, 0,
+ 64, 512, 512
+ },
+ [DRM_COLOR_YCBCR_BT709][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 1024, 0, 1613,
+ 1024, -192, -479,
+ 1024, 1900, 0,
+ 0, 512, 512
+ },
+ [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_LIMITED_RANGE] = {
+ 1024, 0, 1476,
+ 1024, -165, -572,
+ 1024, 1884, 0,
+ 0, 512, 512
+ },
+ [DRM_COLOR_YCBCR_BT2020][DRM_COLOR_YCBCR_FULL_RANGE] = {
+ 1024, 0, 1510,
+ 1024, -168, -585,
+ 1024, 1927, 0,
+ 0, 512, 512
+ }
+};
+
+static void malidp_de_set_color_encoding(struct malidp_plane *plane,
+ enum drm_color_encoding enc,
+ enum drm_color_range range)
+{
+ unsigned int i;
+
+ for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
+ /* coefficients are signed, two's complement values */
+ malidp_hw_write(plane->hwdev, malidp_yuv2rgb_coeffs[enc][range][i],
+ plane->layer->base + plane->layer->yuv2rgb_offset +
+ i * 4);
+ }
+}
+
static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -266,6 +327,7 @@ static void malidp_de_plane_update(struct drm_plane *plane,
struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
u32 src_w, src_h, dest_w, dest_h, val;
int i;
+ bool format_has_alpha = plane->state->fb->format->has_alpha;
mp = to_malidp_plane(plane);
@@ -289,6 +351,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
malidp_de_set_plane_pitches(mp, ms->n_planes,
plane->state->fb->pitches);
+ if ((plane->state->color_encoding != old_state->color_encoding) ||
+ (plane->state->color_range != old_state->color_range))
+ malidp_de_set_color_encoding(mp, plane->state->color_encoding,
+ plane->state->color_range);
+
malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
mp->layer->base + MALIDP_LAYER_SIZE);
@@ -317,12 +384,25 @@ static void malidp_de_plane_update(struct drm_plane *plane,
if (plane->state->rotation & DRM_MODE_REFLECT_Y)
val |= LAYER_V_FLIP;
- /*
- * always enable pixel alpha blending until we have a way to change
- * blend modes
- */
val &= ~LAYER_COMP_MASK;
- val |= LAYER_COMP_PIXEL;
+ if (format_has_alpha) {
+
+ /*
+ * always enable pixel alpha blending until we have a way
+ * to change blend modes
+ */
+ val |= LAYER_COMP_PIXEL;
+ } else {
+
+ /*
+ * do not enable pixel alpha blending as the color channel
+ * does not have any alpha information
+ */
+ val |= LAYER_COMP_PLANE;
+
+ /* Set layer alpha coefficient to 0xff ie fully opaque */
+ val |= LAYER_ALPHA(0xff);
+ }
val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
if (plane->state->crtc) {
@@ -417,6 +497,26 @@ int malidp_de_planes_init(struct drm_device *drm)
drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0, flags);
malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
plane->layer->base + MALIDP_LAYER_COMPOSE);
+
+ /* Attach the YUV->RGB property only to video layers */
+ if (id & (DE_VIDEO1 | DE_VIDEO2)) {
+ /* default encoding for YUV->RGB is BT601 NARROW */
+ enum drm_color_encoding enc = DRM_COLOR_YCBCR_BT601;
+ enum drm_color_range range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+
+ ret = drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) | \
+ BIT(DRM_COLOR_YCBCR_BT709) | \
+ BIT(DRM_COLOR_YCBCR_BT2020),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | \
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ enc, range);
+ if (!ret)
+ /* program the HW registers */
+ malidp_de_set_color_encoding(plane, enc, range);
+ else
+ DRM_WARN("Failed to create video layer %d color properties\n", id);
+ }
}
kfree(formats);
@@ -424,18 +524,7 @@ int malidp_de_planes_init(struct drm_device *drm)
return 0;
cleanup:
- malidp_de_planes_destroy(drm);
kfree(formats);
return ret;
}
-
-void malidp_de_planes_destroy(struct drm_device *drm)
-{
- struct drm_plane *p, *pt;
-
- list_for_each_entry_safe(p, pt, &drm->mode_config.plane_list, head) {
- drm_plane_cleanup(p);
- kfree(p);
- }
-}
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 2039f857f77d..149024fb4432 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -170,10 +170,7 @@
#define MALIDP500_CONFIG_3D 0x00038
#define MALIDP500_BGND_COLOR 0x0003c
#define MALIDP500_OUTPUT_DEPTH 0x00044
-#define MALIDP500_YUV_RGB_COEF 0x00048
-#define MALIDP500_COLOR_ADJ_COEF 0x00078
-#define MALIDP500_COEF_TABLE_ADDR 0x000a8
-#define MALIDP500_COEF_TABLE_DATA 0x000ac
+#define MALIDP500_COEFFS_BASE 0x00078
/*
* The YUV2RGB coefficients on the DP500 are not in the video layer's register
@@ -181,11 +178,6 @@
* the negative offset.
*/
#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8))
-/*
- * To match DP550/650, the start of the coeffs registers is
- * at COLORADJ_COEFF0 instead of at YUV_RGB_COEF1.
- */
-#define MALIDP500_COEFFS_BASE 0x00078
#define MALIDP500_DE_LV_BASE 0x00100
#define MALIDP500_DE_LV_PTR_BASE 0x00124
#define MALIDP500_DE_LG1_BASE 0x00200
@@ -213,6 +205,7 @@
#define MALIDP550_DE_BGND_COLOR 0x00044
#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c
#define MALIDP550_COEFFS_BASE 0x00050
+#define MALIDP550_LV_YUV2RGB 0x00084
#define MALIDP550_DE_LV1_BASE 0x00100
#define MALIDP550_DE_LV1_PTR_BASE 0x00124
#define MALIDP550_DE_LV2_BASE 0x00200
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index a693ab3078f0..5c52307146c7 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
@@ -35,6 +36,8 @@
#define to_dp(nm) container_of(nm, struct analogix_dp_device, nm)
+static const bool verify_fast_training;
+
struct bridge_init {
struct i2c_client *client;
struct device_node *node;
@@ -98,18 +101,18 @@ static int analogix_dp_detect_hpd(struct analogix_dp_device *dp)
return 0;
}
-int analogix_dp_psr_supported(struct analogix_dp_device *dp)
+int analogix_dp_psr_enabled(struct analogix_dp_device *dp)
{
- return dp->psr_support;
+ return dp->psr_enable;
}
-EXPORT_SYMBOL_GPL(analogix_dp_psr_supported);
+EXPORT_SYMBOL_GPL(analogix_dp_psr_enabled);
int analogix_dp_enable_psr(struct analogix_dp_device *dp)
{
struct edp_vsc_psr psr_vsc;
- if (!dp->psr_support)
+ if (!dp->psr_enable)
return 0;
/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
@@ -122,8 +125,7 @@ int analogix_dp_enable_psr(struct analogix_dp_device *dp)
psr_vsc.DB0 = 0;
psr_vsc.DB1 = EDP_VSC_PSR_STATE_ACTIVE | EDP_VSC_PSR_CRC_VALUES_VALID;
- analogix_dp_send_psr_spd(dp, &psr_vsc);
- return 0;
+ return analogix_dp_send_psr_spd(dp, &psr_vsc, true);
}
EXPORT_SYMBOL_GPL(analogix_dp_enable_psr);
@@ -132,7 +134,7 @@ int analogix_dp_disable_psr(struct analogix_dp_device *dp)
struct edp_vsc_psr psr_vsc;
int ret;
- if (!dp->psr_support)
+ if (!dp->psr_enable)
return 0;
/* Prepare VSC packet as per EDP 1.4 spec, Table 6.9 */
@@ -149,8 +151,7 @@ int analogix_dp_disable_psr(struct analogix_dp_device *dp)
if (ret != 1)
dev_err(dp->dev, "Failed to set DP Power0 %d\n", ret);
- analogix_dp_send_psr_spd(dp, &psr_vsc);
- return 0;
+ return analogix_dp_send_psr_spd(dp, &psr_vsc, false);
}
EXPORT_SYMBOL_GPL(analogix_dp_disable_psr);
@@ -530,7 +531,7 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
{
int lane, lane_count, retval;
u32 reg;
- u8 link_align, link_status[2], adjust_request[2];
+ u8 link_align, link_status[2], adjust_request[2], spread;
usleep_range(400, 401);
@@ -573,6 +574,20 @@ static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp)
dev_dbg(dp->dev, "final lane count = %.2x\n",
dp->link_train.lane_count);
+ retval = drm_dp_dpcd_readb(&dp->aux, DP_MAX_DOWNSPREAD,
+ &spread);
+ if (retval != 1) {
+ dev_err(dp->dev, "failed to read downspread %d\n",
+ retval);
+ dp->fast_train_support = false;
+ } else {
+ dp->fast_train_support =
+ (spread & DP_NO_AUX_HANDSHAKE_LINK_TRAINING) ?
+ true : false;
+ }
+ dev_dbg(dp->dev, "fast link training %s\n",
+ dp->fast_train_support ? "supported" : "unsupported");
+
/* set enhanced mode if available */
analogix_dp_set_enhanced_mode(dp);
dp->link_train.lt_state = FINISHED;
@@ -629,10 +644,12 @@ static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp,
*lane_count = DPCD_MAX_LANE_COUNT(data);
}
-static void analogix_dp_init_training(struct analogix_dp_device *dp,
- enum link_lane_count_type max_lane,
- int max_rate)
+static int analogix_dp_full_link_train(struct analogix_dp_device *dp,
+ u32 max_lanes, u32 max_rate)
{
+ int retval = 0;
+ bool training_finished = false;
+
/*
* MACRO_RST must be applied after the PLL_LOCK to avoid
* the DP inter pair skew issue for at least 10 us
@@ -658,18 +675,13 @@ static void analogix_dp_init_training(struct analogix_dp_device *dp,
}
/* Setup TX lane count & rate */
- if (dp->link_train.lane_count > max_lane)
- dp->link_train.lane_count = max_lane;
+ if (dp->link_train.lane_count > max_lanes)
+ dp->link_train.lane_count = max_lanes;
if (dp->link_train.link_rate > max_rate)
dp->link_train.link_rate = max_rate;
/* All DP analog module power up */
analogix_dp_set_analog_power_down(dp, POWER_ALL, 0);
-}
-
-static int analogix_dp_sw_link_training(struct analogix_dp_device *dp)
-{
- int retval = 0, training_finished = 0;
dp->link_train.lt_state = START;
@@ -704,22 +716,88 @@ static int analogix_dp_sw_link_training(struct analogix_dp_device *dp)
return retval;
}
-static int analogix_dp_set_link_train(struct analogix_dp_device *dp,
- u32 count, u32 bwtype)
+static int analogix_dp_fast_link_train(struct analogix_dp_device *dp)
{
- int i;
- int retval;
+ int i, ret;
+ u8 link_align, link_status[2];
+ enum pll_status status;
- for (i = 0; i < DP_TIMEOUT_LOOP_COUNT; i++) {
- analogix_dp_init_training(dp, count, bwtype);
- retval = analogix_dp_sw_link_training(dp);
- if (retval == 0)
- break;
+ analogix_dp_reset_macro(dp);
+
+ analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate);
+ analogix_dp_set_lane_count(dp, dp->link_train.lane_count);
- usleep_range(100, 110);
+ for (i = 0; i < dp->link_train.lane_count; i++) {
+ analogix_dp_set_lane_link_training(dp,
+ dp->link_train.training_lane[i], i);
}
- return retval;
+ ret = readx_poll_timeout(analogix_dp_get_pll_lock_status, dp, status,
+ status != PLL_UNLOCKED, 120,
+ 120 * DP_TIMEOUT_LOOP_COUNT);
+ if (ret) {
+ DRM_DEV_ERROR(dp->dev, "Wait for pll lock failed %d\n", ret);
+ return ret;
+ }
+
+ /* source Set training pattern 1 */
+ analogix_dp_set_training_pattern(dp, TRAINING_PTN1);
+ /* From DP spec, pattern must be on-screen for a minimum 500us */
+ usleep_range(500, 600);
+
+ analogix_dp_set_training_pattern(dp, TRAINING_PTN2);
+ /* From DP spec, pattern must be on-screen for a minimum 500us */
+ usleep_range(500, 600);
+
+ /* TODO: enhanced_mode?*/
+ analogix_dp_set_training_pattern(dp, DP_NONE);
+
+ /*
+ * Useful for debugging issues with fast link training, disable for more
+ * speed
+ */
+ if (verify_fast_training) {
+ ret = drm_dp_dpcd_readb(&dp->aux, DP_LANE_ALIGN_STATUS_UPDATED,
+ &link_align);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dp->dev, "Read align status failed %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = drm_dp_dpcd_read(&dp->aux, DP_LANE0_1_STATUS, link_status,
+ 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dp->dev, "Read link status failed %d\n",
+ ret);
+ return ret;
+ }
+
+ if (analogix_dp_clock_recovery_ok(link_status,
+ dp->link_train.lane_count)) {
+ DRM_DEV_ERROR(dp->dev, "Clock recovery failed\n");
+ analogix_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+
+ if (analogix_dp_channel_eq_ok(link_status, link_align,
+ dp->link_train.lane_count)) {
+ DRM_DEV_ERROR(dp->dev, "Channel EQ failed\n");
+ analogix_dp_reduce_link_rate(dp);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int analogix_dp_train_link(struct analogix_dp_device *dp)
+{
+ if (dp->fast_train_support)
+ return analogix_dp_fast_link_train(dp);
+
+ return analogix_dp_full_link_train(dp, dp->video_info.max_lane_count,
+ dp->video_info.max_link_rate);
}
static int analogix_dp_config_video(struct analogix_dp_device *dp)
@@ -848,10 +926,10 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
DRM_ERROR("failed to disable the panel\n");
}
- ret = analogix_dp_set_link_train(dp, dp->video_info.max_lane_count,
- dp->video_info.max_link_rate);
+ ret = readx_poll_timeout(analogix_dp_train_link, dp, ret, !ret, 100,
+ DP_TIMEOUT_TRAINING_US * 5);
if (ret) {
- dev_err(dp->dev, "unable to do link train\n");
+ dev_err(dp->dev, "unable to do link train, ret=%d\n", ret);
return;
}
@@ -873,8 +951,8 @@ static void analogix_dp_commit(struct analogix_dp_device *dp)
/* Enable video */
analogix_dp_start_video(dp);
- dp->psr_support = analogix_dp_detect_sink_psr(dp);
- if (dp->psr_support)
+ dp->psr_enable = analogix_dp_detect_sink_psr(dp);
+ if (dp->psr_enable)
analogix_dp_enable_sink_psr(dp);
}
@@ -1119,6 +1197,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
if (ret)
DRM_ERROR("failed to setup the panel ret = %d\n", ret);
+ dp->psr_enable = false;
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index 5c6a28806129..6a96ef7e6934 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -20,6 +20,10 @@
#define MAX_CR_LOOP 5
#define MAX_EQ_LOOP 5
+/* Training takes 22ms if AUX channel comm fails. Use this as retry interval */
+#define DP_TIMEOUT_TRAINING_US 22000
+#define DP_TIMEOUT_PSR_LOOP_MS 300
+
/* DP_MAX_LANE_COUNT */
#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1)
#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f)
@@ -168,7 +172,8 @@ struct analogix_dp_device {
int dpms_mode;
int hpd_gpio;
bool force_hpd;
- bool psr_support;
+ bool psr_enable;
+ bool fast_train_support;
struct mutex panel_lock;
bool panel_is_modeset;
@@ -247,8 +252,8 @@ void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp);
void analogix_dp_enable_scrambling(struct analogix_dp_device *dp);
void analogix_dp_disable_scrambling(struct analogix_dp_device *dp);
void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp);
-void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
- struct edp_vsc_psr *vsc);
+int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+ struct edp_vsc_psr *vsc, bool blocking);
ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
struct drm_dp_aux_msg *msg);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 303083ad28e3..9df2f3ef000c 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -10,10 +10,11 @@
* option) any later version.
*/
-#include <linux/device.h>
-#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
#include <drm/bridge/analogix_dp.h>
@@ -992,10 +993,25 @@ void analogix_dp_enable_psr_crc(struct analogix_dp_device *dp)
writel(PSR_VID_CRC_ENABLE, dp->reg_base + ANALOGIX_DP_CRC_CON);
}
-void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
- struct edp_vsc_psr *vsc)
+static ssize_t analogix_dp_get_psr_status(struct analogix_dp_device *dp)
+{
+ ssize_t val;
+ u8 status;
+
+ val = drm_dp_dpcd_readb(&dp->aux, DP_PSR_STATUS, &status);
+ if (val < 0) {
+ dev_err(dp->dev, "PSR_STATUS read failed ret=%zd", val);
+ return val;
+ }
+ return status;
+}
+
+int analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
+ struct edp_vsc_psr *vsc, bool blocking)
{
unsigned int val;
+ int ret;
+ ssize_t psr_status;
/* don't send info frame */
val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
@@ -1036,6 +1052,20 @@ void analogix_dp_send_psr_spd(struct analogix_dp_device *dp,
val = readl(dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
val |= IF_EN;
writel(val, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL);
+
+ if (!blocking)
+ return 0;
+
+ ret = readx_poll_timeout(analogix_dp_get_psr_status, dp, psr_status,
+ psr_status >= 0 &&
+ ((vsc->DB1 && psr_status == DP_PSR_SINK_ACTIVE_RFB) ||
+ (!vsc->DB1 && psr_status == DP_PSR_SINK_INACTIVE)), 1500,
+ DP_TIMEOUT_PSR_LOOP_MS * 1000);
+ if (ret) {
+ dev_warn(dp->dev, "Failed to apply PSR %d\n", ret);
+ return ret;
+ }
+ return 0;
}
ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 53ebbe2904b6..ec8d0006ef7c 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -147,7 +147,6 @@ struct dw_hdmi {
int vic;
u8 edid[HDMI_EDID_LEN];
- bool cable_plugin;
struct {
const struct dw_hdmi_phy_ops *ops;
@@ -1679,12 +1678,6 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
hdmi_writeb(hdmi, val, HDMI_FC_INVIDCONF);
}
-static void hdmi_enable_overflow_interrupts(struct dw_hdmi *hdmi)
-{
- hdmi_writeb(hdmi, 0, HDMI_FC_MASK2);
- hdmi_writeb(hdmi, 0, HDMI_IH_MUTE_FC_STAT2);
-}
-
static void hdmi_disable_overflow_interrupts(struct dw_hdmi *hdmi)
{
hdmi_writeb(hdmi, HDMI_IH_MUTE_FC_STAT2_OVERFLOW_MASK,
@@ -1774,8 +1767,6 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
hdmi_tx_hdcp_config(hdmi);
dw_hdmi_clear_overflow(hdmi);
- if (hdmi->cable_plugin && hdmi->sink_is_hdmi)
- hdmi_enable_overflow_interrupts(hdmi);
return 0;
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 34b7d420e555..7d25c42f22db 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -391,8 +391,7 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
if (blob) {
if (blob->length != sizeof(struct drm_mode_modeinfo) ||
drm_mode_convert_umode(state->crtc->dev, &state->mode,
- (const struct drm_mode_modeinfo *)
- blob->data))
+ blob->data))
return -EINVAL;
state->mode_blob = drm_property_blob_get(blob);
@@ -409,11 +408,36 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
}
EXPORT_SYMBOL(drm_atomic_set_mode_prop_for_crtc);
+/**
+ * drm_atomic_replace_property_blob_from_id - lookup the new blob and replace the old one with it
+ * @dev: DRM device
+ * @blob: a pointer to the member blob to be replaced
+ * @blob_id: ID of the new blob
+ * @expected_size: total expected size of the blob data (in bytes)
+ * @expected_elem_size: expected element size of the blob data (in bytes)
+ * @replaced: did the blob get replaced?
+ *
+ * Replace @blob with another blob with the ID @blob_id. If @blob_id is zero
+ * @blob becomes NULL.
+ *
+ * If @expected_size is positive the new blob length is expected to be equal
+ * to @expected_size bytes. If @expected_elem_size is positive the new blob
+ * length is expected to be a multiple of @expected_elem_size bytes. Otherwise
+ * an error is returned.
+ *
+ * @replaced will indicate to the caller whether the blob was replaced or not.
+ * If the old and new blobs were in fact the same blob @replaced will be false
+ * otherwise it will be true.
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
static int
drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
struct drm_property_blob **blob,
uint64_t blob_id,
ssize_t expected_size,
+ ssize_t expected_elem_size,
bool *replaced)
{
struct drm_property_blob *new_blob = NULL;
@@ -423,7 +447,13 @@ drm_atomic_replace_property_blob_from_id(struct drm_device *dev,
if (new_blob == NULL)
return -EINVAL;
- if (expected_size > 0 && expected_size != new_blob->length) {
+ if (expected_size > 0 &&
+ new_blob->length != expected_size) {
+ drm_property_blob_put(new_blob);
+ return -EINVAL;
+ }
+ if (expected_elem_size > 0 &&
+ new_blob->length % expected_elem_size != 0) {
drm_property_blob_put(new_blob);
return -EINVAL;
}
@@ -471,7 +501,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->degamma_lut,
val,
- -1,
+ -1, sizeof(struct drm_color_lut),
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
@@ -479,7 +509,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->ctm,
val,
- sizeof(struct drm_color_ctm),
+ sizeof(struct drm_color_ctm), -1,
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
@@ -487,7 +517,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
ret = drm_atomic_replace_property_blob_from_id(dev,
&state->gamma_lut,
val,
- -1,
+ -1, sizeof(struct drm_color_lut),
&replaced);
state->color_mgmt_changed |= replaced;
return ret;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 00c78c1c9681..c35654591c12 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3818,7 +3818,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
}
/* Prepare GAMMA_LUT with the legacy values. */
- blob_data = (struct drm_color_lut *) blob->data;
+ blob_data = blob->data;
for (i = 0; i < size; i++) {
blob_data[i].red = red[i];
blob_data[i].green = green[i];
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 1ee84dd802d4..ba8cfe65c65b 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -129,10 +129,10 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
* applicable and if supported by the kernel.
*/
-static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
+static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags,
- struct drm_map_list ** maplist)
+ struct drm_map_list **maplist)
{
struct drm_local_map *map;
struct drm_map_list *list;
@@ -224,7 +224,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
case _DRM_SHM:
list = drm_find_matching_map(dev, map);
if (list != NULL) {
- if(list->map->size != map->size) {
+ if (list->map->size != map->size) {
DRM_DEBUG("Matching maps of type %d with "
"mismatched sizes, (%ld vs %ld)\n",
map->type, map->size, list->map->size);
@@ -361,7 +361,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return 0;
}
-int drm_legacy_addmap(struct drm_device * dev, resource_size_t offset,
+int drm_legacy_addmap(struct drm_device *dev, resource_size_t offset,
unsigned int size, enum drm_map_type type,
enum drm_map_flags flags, struct drm_local_map **map_ptr)
{
@@ -637,8 +637,8 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
*
* Frees any pages and buffers associated with the given entry.
*/
-static void drm_cleanup_buf_error(struct drm_device * dev,
- struct drm_buf_entry * entry)
+static void drm_cleanup_buf_error(struct drm_device *dev,
+ struct drm_buf_entry *entry)
{
int i;
@@ -1446,8 +1446,8 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
int __drm_legacy_mapbufs(struct drm_device *dev, void *data, int *p,
void __user **v,
int (*f)(void *, int, unsigned long,
- struct drm_buf *),
- struct drm_file *file_priv)
+ struct drm_buf *),
+ struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int retcode = 0;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a797bbf1cab8..49147b2aa288 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1554,8 +1554,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
struct edid *override = NULL;
if (connector->override_edid)
- override = drm_edid_duplicate((const struct edid *)
- connector->edid_blob_ptr->data);
+ override = drm_edid_duplicate(connector->edid_blob_ptr->data);
if (!override)
override = drm_load_edid_firmware(connector);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 035784ddd133..0646b108030b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1351,7 +1351,7 @@ static struct drm_property_blob *setcmap_new_gamma_lut(struct drm_crtc *crtc,
if (IS_ERR(gamma_lut))
return gamma_lut;
- lut = (struct drm_color_lut *)gamma_lut->data;
+ lut = gamma_lut->data;
if (cmap->start || cmap->len != size) {
u16 *r = crtc->gamma_store;
u16 *g = r + crtc->gamma_size;
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 5a13ff29f4f0..0eebe8ba8a2c 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -158,9 +158,10 @@ static int framebuffer_check(struct drm_device *dev,
info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN);
if (!info) {
struct drm_format_name_buf format_name;
+
DRM_DEBUG_KMS("bad framebuffer format %s\n",
- drm_get_format_name(r->pixel_format,
- &format_name));
+ drm_get_format_name(r->pixel_format,
+ &format_name));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 5a8033fda4e3..f6b7c0e36a1a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -773,24 +773,23 @@ EXPORT_SYMBOL(drm_mode_hsync);
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
int refresh = 0;
- unsigned int calc_val;
if (mode->vrefresh > 0)
refresh = mode->vrefresh;
else if (mode->htotal > 0 && mode->vtotal > 0) {
- int vtotal;
- vtotal = mode->vtotal;
- /* work out vrefresh the value will be x1000 */
- calc_val = (mode->clock * 1000);
- calc_val /= mode->htotal;
- refresh = (calc_val + vtotal / 2) / vtotal;
+ unsigned int num, den;
+
+ num = mode->clock * 1000;
+ den = mode->htotal * mode->vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- refresh *= 2;
+ num *= 2;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- refresh /= 2;
+ den *= 2;
if (mode->vscan > 1)
- refresh /= mode->vscan;
+ den *= mode->vscan;
+
+ refresh = DIV_ROUND_CLOSEST(num, den);
}
return refresh;
}
@@ -1596,12 +1595,8 @@ int drm_mode_convert_umode(struct drm_device *dev,
struct drm_display_mode *out,
const struct drm_mode_modeinfo *in)
{
- int ret = -EINVAL;
-
- if (in->clock > INT_MAX || in->vrefresh > INT_MAX) {
- ret = -ERANGE;
- goto out;
- }
+ if (in->clock > INT_MAX || in->vrefresh > INT_MAX)
+ return -ERANGE;
out->clock = in->clock;
out->hdisplay = in->hdisplay;
@@ -1622,14 +1617,11 @@ int drm_mode_convert_umode(struct drm_device *dev,
out->status = drm_mode_validate_driver(dev, out);
if (out->status != MODE_OK)
- goto out;
+ return -EINVAL;
drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
- ret = 0;
-
-out:
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index a5d1fc7e8a37..6d2a6e428a3e 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -104,7 +104,7 @@ static int create_in_format_blob(struct drm_device *dev, struct drm_plane *plane
if (IS_ERR(blob))
return -1;
- blob_data = (struct drm_format_modifier_blob *)blob->data;
+ blob_data = blob->data;
blob_data->version = FORMAT_BLOB_CURRENT;
blob_data->count_formats = plane->format_count;
blob_data->formats_offset = sizeof(struct drm_format_modifier_blob);
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 781518fd88e3..b25f98f33f6c 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -63,16 +63,34 @@ void drm_printf(struct drm_printer *p, const char *f, ...)
}
EXPORT_SYMBOL(drm_printf);
-#define DRM_PRINTK_FMT "[" DRM_NAME ":%s]%s %pV"
-
void drm_dev_printk(const struct device *dev, const char *level,
- unsigned int category, const char *function_name,
- const char *prefix, const char *format, ...)
+ const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (category != DRM_UT_NONE && !(drm_debug & category))
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ if (dev)
+ dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+ else
+ printk("%s" "[" DRM_NAME ":%ps] %pV",
+ level, __builtin_return_address(0), &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_dev_printk);
+
+void drm_dev_dbg(const struct device *dev, unsigned int category,
+ const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (!(drm_debug & category))
return;
va_start(args, format);
@@ -80,32 +98,47 @@ void drm_dev_printk(const struct device *dev, const char *level,
vaf.va = &args;
if (dev)
- dev_printk(level, dev, DRM_PRINTK_FMT, function_name, prefix,
- &vaf);
+ dev_printk(KERN_DEBUG, dev, "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
else
- printk("%s" DRM_PRINTK_FMT, level, function_name, prefix, &vaf);
+ printk(KERN_DEBUG "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
va_end(args);
}
-EXPORT_SYMBOL(drm_dev_printk);
+EXPORT_SYMBOL(drm_dev_dbg);
-void drm_printk(const char *level, unsigned int category,
- const char *format, ...)
+void drm_dbg(unsigned int category, const char *format, ...)
{
struct va_format vaf;
va_list args;
- if (category != DRM_UT_NONE && !(drm_debug & category))
+ if (!(drm_debug & category))
return;
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
- printk("%s" "[" DRM_NAME ":%ps]%s %pV",
- level, __builtin_return_address(0),
- strcmp(level, KERN_ERR) == 0 ? " *ERROR*" : "", &vaf);
+ printk(KERN_DEBUG "[" DRM_NAME ":%ps] %pV",
+ __builtin_return_address(0), &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(drm_dbg);
+
+void drm_err(const char *format, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ printk(KERN_ERR "[" DRM_NAME ":%ps] *ERROR* %pV",
+ __builtin_return_address(0), &vaf);
va_end(args);
}
-EXPORT_SYMBOL(drm_printk);
+EXPORT_SYMBOL(drm_err);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 6ac6ee41a6a3..8f4672daac7f 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -567,6 +567,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
/* This must be explicitly initialised, so we can safely call list_del
* on it in the removal handler, even if it isn't in a file list. */
INIT_LIST_HEAD(&blob->head_file);
+ blob->data = (void *)blob + sizeof(*blob);
blob->length = length;
blob->dev = dev;
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 3f58b4077767..e5bfeca361bd 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -11,6 +11,7 @@ config DRM_ETNAVIV
select WANT_DEV_COREDUMP
select CMA if HAVE_DMA_CONTIGUOUS
select DMA_CMA if HAVE_DMA_CONTIGUOUS
+ select DRM_SCHED
help
DRM driver for Vivante GPUs.
diff --git a/drivers/gpu/drm/etnaviv/Makefile b/drivers/gpu/drm/etnaviv/Makefile
index 1281c8d4fae5..46e5ffad69a6 100644
--- a/drivers/gpu/drm/etnaviv/Makefile
+++ b/drivers/gpu/drm/etnaviv/Makefile
@@ -9,9 +9,11 @@ etnaviv-y := \
etnaviv_gem_submit.o \
etnaviv_gem.o \
etnaviv_gpu.o \
+ etnaviv_hwdb.o \
etnaviv_iommu_v2.o \
etnaviv_iommu.o \
etnaviv_mmu.o \
- etnaviv_perfmon.o
+ etnaviv_perfmon.o \
+ etnaviv_sched.o
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
diff --git a/drivers/gpu/drm/etnaviv/common.xml.h b/drivers/gpu/drm/etnaviv/common.xml.h
index 207f45c999c3..001faea80fef 100644
--- a/drivers/gpu/drm/etnaviv/common.xml.h
+++ b/drivers/gpu/drm/etnaviv/common.xml.h
@@ -8,15 +8,12 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state.xml ( 19930 bytes, from 2017-03-09 15:43:43)
-- common.xml ( 23473 bytes, from 2017-03-09 15:43:43)
-- state_hi.xml ( 26403 bytes, from 2017-03-09 15:43:43)
-- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
-- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
-- state_3d.xml ( 66957 bytes, from 2017-03-09 15:43:43)
-- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
+- texdesc_3d.xml ( 3183 bytes, from 2017-12-18 16:51:59)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
-Copyright (C) 2012-2017 by the following authors:
+Copyright (C) 2012-2018 by the following authors:
- Wladimir J. van der Laan <laanwj@gmail.com>
- Christian Gmeiner <christian.gmeiner@gmail.com>
- Lucas Stach <l.stach@pengutronix.de>
@@ -49,12 +46,7 @@ DEALINGS IN THE SOFTWARE.
#define SYNC_RECIPIENT_RA 0x00000005
#define SYNC_RECIPIENT_PE 0x00000007
#define SYNC_RECIPIENT_DE 0x0000000b
-#define SYNC_RECIPIENT_VG 0x0000000f
-#define SYNC_RECIPIENT_TESSELATOR 0x00000010
-#define SYNC_RECIPIENT_VG2 0x00000011
-#define SYNC_RECIPIENT_TESSELATOR2 0x00000012
-#define SYNC_RECIPIENT_VG3 0x00000013
-#define SYNC_RECIPIENT_TESSELATOR3 0x00000014
+#define SYNC_RECIPIENT_BLT 0x00000010
#define ENDIAN_MODE_NO_SWAP 0x00000000
#define ENDIAN_MODE_SWAP_16 0x00000001
#define ENDIAN_MODE_SWAP_32 0x00000002
@@ -77,6 +69,7 @@ DEALINGS IN THE SOFTWARE.
#define chipModel_GC800 0x00000800
#define chipModel_GC860 0x00000860
#define chipModel_GC880 0x00000880
+#define chipModel_GC900 0x00000900
#define chipModel_GC1000 0x00001000
#define chipModel_GC1500 0x00001500
#define chipModel_GC2000 0x00002000
@@ -88,6 +81,12 @@ DEALINGS IN THE SOFTWARE.
#define chipModel_GC5000 0x00005000
#define chipModel_GC5200 0x00005200
#define chipModel_GC6400 0x00006400
+#define chipModel_GC7000 0x00007000
+#define chipModel_GC7400 0x00007400
+#define chipModel_GC8000 0x00008000
+#define chipModel_GC8100 0x00008100
+#define chipModel_GC8200 0x00008200
+#define chipModel_GC8400 0x00008400
#define RGBA_BITS_R 0x00000001
#define RGBA_BITS_G 0x00000002
#define RGBA_BITS_B 0x00000004
@@ -203,7 +202,7 @@ DEALINGS IN THE SOFTWARE.
#define chipMinorFeatures2_RGB888 0x00001000
#define chipMinorFeatures2_TX__YUV_ASSEMBLER 0x00002000
#define chipMinorFeatures2_DYNAMIC_FREQUENCY_SCALING 0x00004000
-#define chipMinorFeatures2_EXTRA_TEXTURE_STATE 0x00008000
+#define chipMinorFeatures2_TX_FILTER 0x00008000
#define chipMinorFeatures2_FULL_DIRECTFB 0x00010000
#define chipMinorFeatures2_2D_TILING 0x00020000
#define chipMinorFeatures2_THREAD_WALKER_IN_PS 0x00040000
@@ -242,36 +241,36 @@ DEALINGS IN THE SOFTWARE.
#define chipMinorFeatures3_TX_ENHANCEMENTS1 0x00080000
#define chipMinorFeatures3_SH_ENHANCEMENTS1 0x00100000
#define chipMinorFeatures3_SH_ENHANCEMENTS2 0x00200000
-#define chipMinorFeatures3_UNK22 0x00400000
+#define chipMinorFeatures3_PE_ENHANCEMENTS1 0x00400000
#define chipMinorFeatures3_2D_FC_SOURCE 0x00800000
-#define chipMinorFeatures3_UNK24 0x01000000
-#define chipMinorFeatures3_UNK25 0x02000000
+#define chipMinorFeatures3_BUG_FIXES_14 0x01000000
+#define chipMinorFeatures3_POWER_OPTIMIZATIONS_0 0x02000000
#define chipMinorFeatures3_NEW_HZ 0x04000000
-#define chipMinorFeatures3_UNK27 0x08000000
-#define chipMinorFeatures3_UNK28 0x10000000
+#define chipMinorFeatures3_PE_DITHER_FIX 0x08000000
+#define chipMinorFeatures3_DE_ENHANCEMENTS3 0x10000000
#define chipMinorFeatures3_SH_ENHANCEMENTS3 0x20000000
-#define chipMinorFeatures3_UNK30 0x40000000
-#define chipMinorFeatures3_UNK31 0x80000000
-#define chipMinorFeatures4_UNK0 0x00000001
+#define chipMinorFeatures3_SH_ENHANCEMENTS4 0x40000000
+#define chipMinorFeatures3_TX_ENHANCEMENTS2 0x80000000
+#define chipMinorFeatures4_FE_ENHANCEMENTS1 0x00000001
#define chipMinorFeatures4_PE_ENHANCEMENTS2 0x00000002
#define chipMinorFeatures4_FRUSTUM_CLIP_FIX 0x00000004
-#define chipMinorFeatures4_UNK3 0x00000008
-#define chipMinorFeatures4_UNK4 0x00000010
+#define chipMinorFeatures4_DE_NO_GAMMA 0x00000008
+#define chipMinorFeatures4_PA_ENHANCEMENTS_2 0x00000010
#define chipMinorFeatures4_2D_GAMMA 0x00000020
#define chipMinorFeatures4_SINGLE_BUFFER 0x00000040
-#define chipMinorFeatures4_UNK7 0x00000080
-#define chipMinorFeatures4_UNK8 0x00000100
-#define chipMinorFeatures4_UNK9 0x00000200
-#define chipMinorFeatures4_UNK10 0x00000400
+#define chipMinorFeatures4_HI_ENHANCEMENTS_1 0x00000080
+#define chipMinorFeatures4_TX_ENHANCEMENTS_3 0x00000100
+#define chipMinorFeatures4_SH_ENHANCEMENTS_5 0x00000200
+#define chipMinorFeatures4_FE_ENHANCEMENTS_2 0x00000400
#define chipMinorFeatures4_TX_LERP_PRECISION_FIX 0x00000800
#define chipMinorFeatures4_2D_COLOR_SPACE_CONVERSION 0x00001000
#define chipMinorFeatures4_TEXTURE_ASTC 0x00002000
-#define chipMinorFeatures4_UNK14 0x00004000
-#define chipMinorFeatures4_UNK15 0x00008000
+#define chipMinorFeatures4_PE_ENHANCEMENTS_4 0x00004000
+#define chipMinorFeatures4_MC_ENHANCEMENTS_1 0x00008000
#define chipMinorFeatures4_HALTI2 0x00010000
-#define chipMinorFeatures4_UNK17 0x00020000
+#define chipMinorFeatures4_2D_MIRROR_EXTENSION 0x00020000
#define chipMinorFeatures4_SMALL_MSAA 0x00040000
-#define chipMinorFeatures4_UNK19 0x00080000
+#define chipMinorFeatures4_BUG_FIXES_17 0x00080000
#define chipMinorFeatures4_NEW_RA 0x00100000
#define chipMinorFeatures4_2D_OPF_YUV_OUTPUT 0x00200000
#define chipMinorFeatures4_2D_MULTI_SOURCE_BLT_EX2 0x00400000
@@ -280,41 +279,207 @@ DEALINGS IN THE SOFTWARE.
#define chipMinorFeatures4_BUG_FIXES18 0x02000000
#define chipMinorFeatures4_2D_COMPRESSION 0x04000000
#define chipMinorFeatures4_PROBE 0x08000000
-#define chipMinorFeatures4_UNK28 0x10000000
+#define chipMinorFeatures4_MEDIUM_PRECISION 0x10000000
#define chipMinorFeatures4_2D_SUPER_TILE_VERSION 0x20000000
-#define chipMinorFeatures4_UNK30 0x40000000
-#define chipMinorFeatures4_UNK31 0x80000000
-#define chipMinorFeatures5_UNK0 0x00000001
-#define chipMinorFeatures5_UNK1 0x00000002
-#define chipMinorFeatures5_UNK2 0x00000004
-#define chipMinorFeatures5_UNK3 0x00000008
+#define chipMinorFeatures4_BUG_FIXES19 0x40000000
+#define chipMinorFeatures4_SH_ENHANCEMENTS6 0x80000000
+#define chipMinorFeatures5_SH_ENHANCEMENTS7 0x00000001
+#define chipMinorFeatures5_BUG_FIXES20 0x00000002
+#define chipMinorFeatures5_DE_ADDRESS_40 0x00000004
+#define chipMinorFeatures5_MINI_MMU_FIX 0x00000008
#define chipMinorFeatures5_EEZ 0x00000010
-#define chipMinorFeatures5_UNK5 0x00000020
-#define chipMinorFeatures5_UNK6 0x00000040
-#define chipMinorFeatures5_UNK7 0x00000080
-#define chipMinorFeatures5_UNK8 0x00000100
+#define chipMinorFeatures5_BUG_FIXES21 0x00000020
+#define chipMinorFeatures5_EXTRA_VG_CAPS 0x00000040
+#define chipMinorFeatures5_MULTI_SRC_V15 0x00000080
+#define chipMinorFeatures5_BUG_FIXES22 0x00000100
#define chipMinorFeatures5_HALTI3 0x00000200
-#define chipMinorFeatures5_UNK10 0x00000400
+#define chipMinorFeatures5_TESSELATION_SHADERS 0x00000400
#define chipMinorFeatures5_2D_ONE_PASS_FILTER_TAP 0x00000800
-#define chipMinorFeatures5_UNK12 0x00001000
+#define chipMinorFeatures5_MULTI_SRC_V2_STR_QUAD 0x00001000
#define chipMinorFeatures5_SEPARATE_SRC_DST 0x00002000
#define chipMinorFeatures5_HALTI4 0x00004000
-#define chipMinorFeatures5_UNK15 0x00008000
+#define chipMinorFeatures5_RA_WRITE_DEPTH 0x00008000
#define chipMinorFeatures5_ANDROID_ONLY 0x00010000
#define chipMinorFeatures5_HAS_PRODUCTID 0x00020000
-#define chipMinorFeatures5_UNK18 0x00040000
-#define chipMinorFeatures5_UNK19 0x00080000
+#define chipMinorFeatures5_TX_SUPPORT_DEC 0x00040000
+#define chipMinorFeatures5_S8_MSAA_COMPRESSION 0x00080000
#define chipMinorFeatures5_PE_DITHER_FIX2 0x00100000
-#define chipMinorFeatures5_UNK21 0x00200000
-#define chipMinorFeatures5_UNK22 0x00400000
-#define chipMinorFeatures5_UNK23 0x00800000
-#define chipMinorFeatures5_UNK24 0x01000000
-#define chipMinorFeatures5_UNK25 0x02000000
-#define chipMinorFeatures5_UNK26 0x04000000
+#define chipMinorFeatures5_L2_CACHE_REMOVE 0x00200000
+#define chipMinorFeatures5_FE_ALLOW_RND_VTX_CNT 0x00400000
+#define chipMinorFeatures5_CUBE_MAP_FL28 0x00800000
+#define chipMinorFeatures5_TX_6BIT_FRAC 0x01000000
+#define chipMinorFeatures5_FE_ALLOW_STALL_PREFETCH_ENG 0x02000000
+#define chipMinorFeatures5_THIRD_PARTY_COMPRESSION 0x04000000
#define chipMinorFeatures5_RS_DEPTHSTENCIL_NATIVE_SUPPORT 0x08000000
#define chipMinorFeatures5_V2_MSAA_COMP_FIX 0x10000000
-#define chipMinorFeatures5_UNK29 0x20000000
-#define chipMinorFeatures5_UNK30 0x40000000
-#define chipMinorFeatures5_UNK31 0x80000000
+#define chipMinorFeatures5_HALTI5 0x20000000
+#define chipMinorFeatures5_EVIS 0x40000000
+#define chipMinorFeatures5_BLT_ENGINE 0x80000000
+#define chipMinorFeatures6_BUG_FIXES_23 0x00000001
+#define chipMinorFeatures6_BUG_FIXES_24 0x00000002
+#define chipMinorFeatures6_DEC 0x00000004
+#define chipMinorFeatures6_VS_TILE_NV12 0x00000008
+#define chipMinorFeatures6_VS_TILE_NV12_10BIT 0x00000010
+#define chipMinorFeatures6_RENDER_TARGET_8 0x00000020
+#define chipMinorFeatures6_TEX_LOD_FLOW_CORR 0x00000040
+#define chipMinorFeatures6_FACE_LOD 0x00000080
+#define chipMinorFeatures6_MULTI_CORE_SEMAPHORE_STALL_V2 0x00000100
+#define chipMinorFeatures6_VMSAA 0x00000200
+#define chipMinorFeatures6_CHIP_ENABLE_LINK 0x00000400
+#define chipMinorFeatures6_MULTI_SRC_BLT_1_5_ENHANCEMENT 0x00000800
+#define chipMinorFeatures6_MULTI_SRC_BLT_BILINEAR_FILTER 0x00001000
+#define chipMinorFeatures6_RA_HZEZ_CLOCK_CONTROL 0x00002000
+#define chipMinorFeatures6_CACHE128B256BPERLINE 0x00004000
+#define chipMinorFeatures6_V4_COMPRESSION 0x00008000
+#define chipMinorFeatures6_PE2D_MAJOR_SUPER_TILE 0x00010000
+#define chipMinorFeatures6_PE_32BPC_COLORMASK_FIX 0x00020000
+#define chipMinorFeatures6_ALPHA_BLENDING_OPT 0x00040000
+#define chipMinorFeatures6_NEW_GPIPE 0x00080000
+#define chipMinorFeatures6_PIPELINE_32_ATTRIBUTES 0x00100000
+#define chipMinorFeatures6_MSAA_SHADING 0x00200000
+#define chipMinorFeatures6_NO_ANISTRO_FILTER 0x00400000
+#define chipMinorFeatures6_NO_ASTC 0x00800000
+#define chipMinorFeatures6_NO_DXT 0x01000000
+#define chipMinorFeatures6_HWTFB 0x02000000
+#define chipMinorFeatures6_RA_DEPTH_WRITE_MSAA1X_FIX 0x04000000
+#define chipMinorFeatures6_EZHZ_CLOCKGATE_FIX 0x08000000
+#define chipMinorFeatures6_SH_SNAP2PAGE_FIX 0x10000000
+#define chipMinorFeatures6_SH_HALFDEPENDENCY_FIX 0x20000000
+#define chipMinorFeatures6_USC_MCFILL_FIX 0x40000000
+#define chipMinorFeatures6_TPG_TCPERF_FIX 0x80000000
+#define chipMinorFeatures7_USC_MDFIFO_OVERFLOW_FIX 0x00000001
+#define chipMinorFeatures7_SH_TEXLD_BARRIER_IN_CS_FIX 0x00000002
+#define chipMinorFeatures7_RS_NEW_BASEADDR 0x00000004
+#define chipMinorFeatures7_PE_8BPP_DUALPIPE_FIX 0x00000008
+#define chipMinorFeatures7_SH_ADVANCED_INSTR 0x00000010
+#define chipMinorFeatures7_SH_FLAT_INTERPOLATION_DUAL16_FIX 0x00000020
+#define chipMinorFeatures7_USC_CONTINUOUS_FLUS_FIX 0x00000040
+#define chipMinorFeatures7_SH_SUPPORT_V4 0x00000080
+#define chipMinorFeatures7_SH_SUPPORT_ALPHA_KILL 0x00000100
+#define chipMinorFeatures7_PE_NO_ALPHA_TEST 0x00000200
+#define chipMinorFeatures7_TX_LOD_NEAREST_SELECT 0x00000400
+#define chipMinorFeatures7_SH_FIX_LDEXP 0x00000800
+#define chipMinorFeatures7_SUPPORT_MOVAI 0x00001000
+#define chipMinorFeatures7_SH_SNAP2PAGE_MAXPAGES_FIX 0x00002000
+#define chipMinorFeatures7_PE_RGBA16I_FIX 0x00004000
+#define chipMinorFeatures7_BLT_8bpp_256TILE_FC_FIX 0x00008000
+#define chipMinorFeatures7_PE_64BIT_FENCE_FIX 0x00010000
+#define chipMinorFeatures7_USC_FULL_CACHE_FIX 0x00020000
+#define chipMinorFeatures7_TX_YUV_ASSEMBLER_10BIT 0x00040000
+#define chipMinorFeatures7_FE_32BIT_INDEX_FIX 0x00080000
+#define chipMinorFeatures7_BLT_64BPP_MASKED_CLEAR_FIX 0x00100000
+#define chipMinorFeatures7_BIT_SECURITY 0x00200000
+#define chipMinorFeatures7_BIT_ROBUSTNESS 0x00400000
+#define chipMinorFeatures7_USC_ATOMIC_FIX 0x00800000
+#define chipMinorFeatures7_SH_PSO_MSAA1x_FIX 0x01000000
+#define chipMinorFeatures7_BIT_USC_VX_PERF_FIX 0x02000000
+#define chipMinorFeatures7_EVIS_NO_ABSDIFF 0x04000000
+#define chipMinorFeatures7_EVIS_NO_BITREPLACE 0x08000000
+#define chipMinorFeatures7_EVIS_NO_BOXFILTER 0x10000000
+#define chipMinorFeatures7_EVIS_NO_CORDIAC 0x20000000
+#define chipMinorFeatures7_EVIS_NO_DP32 0x40000000
+#define chipMinorFeatures7_EVIS_NO_FILTER 0x80000000
+#define chipMinorFeatures8_EVIS_NO_IADD 0x00000001
+#define chipMinorFeatures8_EVIS_NO_SELECTADD 0x00000002
+#define chipMinorFeatures8_EVIS_LERP_7OUTPUT 0x00000004
+#define chipMinorFeatures8_EVIS_ACCSQ_8OUTPUT 0x00000008
+#define chipMinorFeatures8_USC_GOS_ADDR_FIX 0x00000010
+#define chipMinorFeatures8_TX_8BIT_UVFRAC 0x00000020
+#define chipMinorFeatures8_TX_DESC_CACHE_CLOCKGATE_FIX 0x00000040
+#define chipMinorFeatures8_RSBLT_MSAA_DECOMPRESSION 0x00000080
+#define chipMinorFeatures8_TX_INTEGER_COORDINATE 0x00000100
+#define chipMinorFeatures8_DRAWID 0x00000200
+#define chipMinorFeatures8_PSIO_SAMPLEMASK_IN_R0ZW_FIX 0x00000400
+#define chipMinorFeatures8_TX_INTEGER_COORDINATE_V2 0x00000800
+#define chipMinorFeatures8_MULTI_CORE_BLOCK_SET_CONFIG 0x00001000
+#define chipMinorFeatures8_VG_RESOLVE_ENGINE 0x00002000
+#define chipMinorFeatures8_VG_PE_COLOR_KEY 0x00004000
+#define chipMinorFeatures8_VG_IM_INDEX_FORMAT 0x00008000
+#define chipMinorFeatures8_SNAPPAGE_CMD 0x00010000
+#define chipMinorFeatures8_SH_NO_INDEX_CONST_ON_A0 0x00020000
+#define chipMinorFeatures8_SH_NO_ONECONST_LIMIT 0x00040000
+#define chipMinorFeatures8_SH_IMG_LDST_ON_TEMP 0x00080000
+#define chipMinorFeatures8_COMPUTE_ONLY 0x00100000
+#define chipMinorFeatures8_SH_IMG_LDST_CLAMP 0x00200000
+#define chipMinorFeatures8_SH_ICACHE_ALLOC_COUNT_FIX 0x00400000
+#define chipMinorFeatures8_SH_ICACHE_PREFETCH 0x00800000
+#define chipMinorFeatures8_PE2D_SEPARATE_CACHE 0x01000000
+#define chipMinorFeatures8_VG_AYUV_INPUT_OUTPUT 0x02000000
+#define chipMinorFeatures8_VG_DOUBLE_IMAGE 0x04000000
+#define chipMinorFeatures8_VG_RECTANGLE_STRIPE_MODE 0x08000000
+#define chipMinorFeatures8_VG_MMU 0x10000000
+#define chipMinorFeatures8_VG_IM_FILTER 0x20000000
+#define chipMinorFeatures8_VG_IM_YUV_PACKET 0x40000000
+#define chipMinorFeatures8_VG_IM_YUV_PLANAR 0x80000000
+#define chipMinorFeatures9_VG_PE_YUV_PACKET 0x00000001
+#define chipMinorFeatures9_VG_COLOR_PRECISION_8_BIT 0x00000002
+#define chipMinorFeatures9_PE_MSAA_OQ_FIX 0x00000004
+#define chipMinorFeatures9_PSIO_MSAA_CL_FIX 0x00000008
+#define chipMinorFeatures9_USC_DEFER_FILL_FIX 0x00000010
+#define chipMinorFeatures9_SH_CLOCK_GATE_FIX 0x00000020
+#define chipMinorFeatures9_FE_NEED_DUMMYDRAW 0x00000040
+#define chipMinorFeatures9_PE2D_LINEAR_YUV420_OUTPUT 0x00000080
+#define chipMinorFeatures9_PE2D_LINEAR_YUV420_10BIT 0x00000100
+#define chipMinorFeatures9_MULTI_CLUSTER 0x00000200
+#define chipMinorFeatures9_VG_TS_CULLING 0x00000400
+#define chipMinorFeatures9_VG_FP25 0x00000800
+#define chipMinorFeatures9_SH_MULTI_WG_PACK 0x00001000
+#define chipMinorFeatures9_SH_DUAL16_SAMPLEMASK_ZW 0x00002000
+#define chipMinorFeatures9_TPG_TRIVIAL_MODE_FIX 0x00004000
+#define chipMinorFeatures9_TX_ASTC_MULTISLICE_FIX 0x00008000
+#define chipMinorFeatures9_FE_ROBUST_FIX 0x00010000
+#define chipMinorFeatures9_SH_GPIPE_ACCESS_FULLTEMPS 0x00020000
+#define chipMinorFeatures9_PSIO_INTERLOCK 0x00040000
+#define chipMinorFeatures9_PA_WIDELINE_FIX 0x00080000
+#define chipMinorFeatures9_WIDELINE_HELPER_FIX 0x00100000
+#define chipMinorFeatures9_G2D_3RD_PARTY_COMPRESSION_1_1 0x00200000
+#define chipMinorFeatures9_TX_FLUSH_L1CACHE 0x00400000
+#define chipMinorFeatures9_PE_DITHER_FIX2 0x00800000
+#define chipMinorFeatures9_G2D_DEC400 0x01000000
+#define chipMinorFeatures9_SH_TEXLD_U_FIX 0x02000000
+#define chipMinorFeatures9_MC_FCCACHE_BYTEMASK 0x04000000
+#define chipMinorFeatures9_SH_MULTI_WG_PACK_FIX 0x08000000
+#define chipMinorFeatures9_DC_OVERLAY_SCALING 0x10000000
+#define chipMinorFeatures9_DC_SOURCE_ROTATION 0x20000000
+#define chipMinorFeatures9_DC_TILED 0x40000000
+#define chipMinorFeatures9_DC_YUV_L1 0x80000000
+#define chipMinorFeatures10_DC_D30_OUTPUT 0x00000001
+#define chipMinorFeatures10_DC_MMU 0x00000002
+#define chipMinorFeatures10_DC_COMPRESSION 0x00000004
+#define chipMinorFeatures10_DC_QOS 0x00000008
+#define chipMinorFeatures10_PE_ADVANCE_BLEND_PART0 0x00000010
+#define chipMinorFeatures10_FE_PATCHLIST_FETCH_FIX 0x00000020
+#define chipMinorFeatures10_RA_CG_FIX 0x00000040
+#define chipMinorFeatures10_EVIS_VX2 0x00000080
+#define chipMinorFeatures10_NN_FLOAT 0x00000100
+#define chipMinorFeatures10_DEC400 0x00000200
+#define chipMinorFeatures10_LS_SUPPORT_PERCOMP_DEPENDENCY 0x00000400
+#define chipMinorFeatures10_TP_ENGINE 0x00000800
+#define chipMinorFeatures10_MULTI_CORE_BLOCK_SET_CONFIG2 0x00001000
+#define chipMinorFeatures10_PE_VMSAA_COVERAGE_CACHE_FIX 0x00002000
+#define chipMinorFeatures10_SECURITY_AHB 0x00004000
+#define chipMinorFeatures10_MULTICORE_SEMAPHORESTALL_V3 0x00008000
+#define chipMinorFeatures10_SMALLBATCH 0x00010000
+#define chipMinorFeatures10_SH_CMPLX 0x00020000
+#define chipMinorFeatures10_SH_IDIV0_SWZL_EHS 0x00040000
+#define chipMinorFeatures10_TX_LERP_LESS_BIT 0x00080000
+#define chipMinorFeatures10_SH_GM_ENDIAN 0x00100000
+#define chipMinorFeatures10_SH_GM_USC_UNALLOC 0x00200000
+#define chipMinorFeatures10_SH_END_OF_BB 0x00400000
+#define chipMinorFeatures10_VIP_V7 0x00800000
+#define chipMinorFeatures10_TX_BORDER_CLAMP_FIX 0x01000000
+#define chipMinorFeatures10_SH_IMG_LD_LASTPIXEL_FIX 0x02000000
+#define chipMinorFeatures10_ASYNC_BLT 0x04000000
+#define chipMinorFeatures10_ASYNC_FE_FENCE_FIX 0x08000000
+#define chipMinorFeatures10_PSCS_THROTTLE 0x10000000
+#define chipMinorFeatures10_SEPARATE_LS 0x20000000
+#define chipMinorFeatures10_MCFE 0x40000000
+#define chipMinorFeatures10_WIDELINE_TRIANGLE_EMU 0x80000000
+#define chipMinorFeatures11_VG_RESOLUTION_8K 0x00000001
+#define chipMinorFeatures11_FENCE_32BIT 0x00000002
+#define chipMinorFeatures11_FENCE_64BIT 0x00000004
+#define chipMinorFeatures11_NN_INTERLEVE8 0x00000008
+#define chipMinorFeatures11_TP_REORDER 0x00000010
+#define chipMinorFeatures11_PE_DEPTH_ONLY_OQFIX 0x00000020
#endif /* COMMON_XML */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
index 99ad2f073c6e..bfc6d4aa3b7c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c
@@ -215,6 +215,24 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe
return buffer->user_size / 8;
}
+u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_cmdbuf *buffer = &gpu->buffer;
+
+ lockdep_assert_held(&gpu->lock);
+
+ buffer->user_size = 0;
+
+ CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
+ VIVS_MMUv2_PTA_CONFIG_INDEX(0));
+
+ CMD_END(buffer);
+
+ buffer->user_size = ALIGN(buffer->user_size, 8);
+
+ return buffer->user_size / 8;
+}
+
void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 6faf4042db23..ab50090d066c 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -101,12 +101,25 @@ static void load_gpu(struct drm_device *dev)
static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
{
+ struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx;
+ int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ for (i = 0; i < ETNA_MAX_PIPES; i++) {
+ struct etnaviv_gpu *gpu = priv->gpu[i];
+
+ if (gpu) {
+ drm_sched_entity_init(&gpu->sched,
+ &ctx->sched_entity[i],
+ &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
+ 32, NULL);
+ }
+ }
+
file->driver_priv = ctx;
return 0;
@@ -126,6 +139,9 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
if (gpu->lastctx == ctx)
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
+
+ drm_sched_entity_fini(&gpu->sched,
+ &ctx->sched_entity[i]);
}
}
@@ -637,25 +653,21 @@ static int compare_str(struct device *dev, void *data)
static int etnaviv_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *node = dev->of_node;
struct component_match *match = NULL;
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (node) {
+ if (!dev->platform_data) {
struct device_node *core_node;
- int i;
- for (i = 0; ; i++) {
- core_node = of_parse_phandle(node, "cores", i);
- if (!core_node)
- break;
+ for_each_compatible_node(core_node, NULL, "vivante,gc") {
+ if (!of_device_is_available(core_node))
+ continue;
drm_of_component_match_add(&pdev->dev, &match,
compare_of, core_node);
- of_node_put(core_node);
}
- } else if (dev->platform_data) {
+ } else {
char **names = dev->platform_data;
unsigned i;
@@ -673,25 +685,18 @@ static int etnaviv_pdev_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id dt_match[] = {
- { .compatible = "fsl,imx-gpu-subsystem" },
- { .compatible = "marvell,dove-gpu-subsystem" },
- {}
-};
-MODULE_DEVICE_TABLE(of, dt_match);
-
static struct platform_driver etnaviv_platform_driver = {
.probe = etnaviv_pdev_probe,
.remove = etnaviv_pdev_remove,
.driver = {
.name = "etnaviv",
- .of_match_table = dt_match,
},
};
static int __init etnaviv_init(void)
{
int ret;
+ struct device_node *np;
etnaviv_validate_init();
@@ -703,6 +708,19 @@ static int __init etnaviv_init(void)
if (ret != 0)
platform_driver_unregister(&etnaviv_gpu_driver);
+ /*
+ * If the DT contains at least one available GPU device, instantiate
+ * the DRM platform device.
+ */
+ for_each_compatible_node(np, NULL, "vivante,gc") {
+ if (!of_device_is_available(np))
+ continue;
+
+ platform_device_register_simple("etnaviv", -1, NULL, 0);
+ of_node_put(np);
+ break;
+ }
+
return ret;
}
module_init(etnaviv_init);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index a54f0b758a5c..ddb17ee565e9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -34,6 +34,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/etnaviv_drm.h>
+#include <drm/gpu_scheduler.h>
struct etnaviv_cmdbuf;
struct etnaviv_gpu;
@@ -42,11 +43,11 @@ struct etnaviv_gem_object;
struct etnaviv_gem_submit;
struct etnaviv_file_private {
- /* currently we don't do anything useful with this.. but when
- * per-context address spaces are supported we'd keep track of
+ /*
+ * When per-context address spaces are supported we'd keep track of
* the context's page-tables here.
*/
- int dummy;
+ struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
};
struct etnaviv_drm_private {
@@ -85,6 +86,7 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
+u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 6d0909c589d1..48aef6cf6a42 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -20,9 +20,13 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
+#include "etnaviv_sched.h"
#include "state.xml.h"
#include "state_hi.xml.h"
+static bool etnaviv_dump_core = true;
+module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
+
struct core_dump_iterator {
void *start;
struct etnaviv_dump_object_header *hdr;
@@ -121,10 +125,16 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
struct etnaviv_vram_mapping *vram;
struct etnaviv_gem_object *obj;
struct etnaviv_gem_submit *submit;
+ struct drm_sched_job *s_job;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
+ /* Only catch the first event, or when manually re-armed */
+ if (!etnaviv_dump_core)
+ return;
+ etnaviv_dump_core = false;
+
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
/* We always dump registers, mmu, ring and end marker */
@@ -135,10 +145,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
- list_for_each_entry(submit, &gpu->active_submit_list, node) {
+ spin_lock(&gpu->sched.job_list_lock);
+ list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
+ submit = to_etnaviv_submit(s_job);
file_size += submit->cmdbuf.size;
n_obj++;
}
+ spin_unlock(&gpu->sched.job_list_lock);
/* Add in the active buffer objects */
list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
@@ -180,10 +193,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
- list_for_each_entry(submit, &gpu->active_submit_list, node)
+ spin_lock(&gpu->sched.job_list_lock);
+ list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
+ submit = to_etnaviv_submit(s_job);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf));
+ }
+ spin_unlock(&gpu->sched.job_list_lock);
/* Reserve space for the bomap */
if (n_bomap_pages) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index be72a9833f2b..93e696fcc14f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -94,6 +94,9 @@ struct etnaviv_gem_submit_bo {
u32 flags;
struct etnaviv_gem_object *obj;
struct etnaviv_vram_mapping *mapping;
+ struct dma_fence *excl;
+ unsigned int nr_shared;
+ struct dma_fence **shared;
};
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
@@ -101,9 +104,11 @@ struct etnaviv_gem_submit_bo {
* make it easier to unwind when things go wrong, etc).
*/
struct etnaviv_gem_submit {
+ struct drm_sched_job sched_job;
struct kref refcount;
struct etnaviv_gpu *gpu;
struct dma_fence *out_fence, *in_fence;
+ int out_fence_id;
struct list_head node; /* GPU active submit list */
struct etnaviv_cmdbuf cmdbuf;
bool runtime_resumed;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 1f8202bca061..46ecd3e66ac9 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -22,6 +22,7 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_perfmon.h"
+#include "etnaviv_sched.h"
/*
* Cmdstream submission:
@@ -169,29 +170,33 @@ fail:
return ret;
}
-static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
+static int submit_fence_sync(struct etnaviv_gem_submit *submit)
{
- unsigned int context = submit->gpu->fence_context;
int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
- bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
- bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
+ struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
+ struct reservation_object *robj = bo->obj->resv;
- ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
- explicit);
- if (ret)
- break;
- }
+ if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
+ ret = reservation_object_reserve_shared(robj);
+ if (ret)
+ return ret;
+ }
+
+ if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
+ continue;
+
+ if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
+ ret = reservation_object_get_fences_rcu(robj, &bo->excl,
+ &bo->nr_shared,
+ &bo->shared);
+ if (ret)
+ return ret;
+ } else {
+ bo->excl = reservation_object_get_excl_rcu(robj);
+ }
- if (submit->flags & ETNA_SUBMIT_FENCE_FD_IN) {
- /*
- * Wait if the fence is from a foreign context, or if the fence
- * array contains any fence from a foreign context.
- */
- if (!dma_fence_match_context(submit->in_fence, context))
- ret = dma_fence_wait(submit->in_fence, true);
}
return ret;
@@ -381,8 +386,13 @@ static void submit_cleanup(struct kref *kref)
if (submit->in_fence)
dma_fence_put(submit->in_fence);
- if (submit->out_fence)
+ if (submit->out_fence) {
+ /* first remove from IDR, so fence can not be found anymore */
+ mutex_lock(&submit->gpu->fence_idr_lock);
+ idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
+ mutex_unlock(&submit->gpu->fence_idr_lock);
dma_fence_put(submit->out_fence);
+ }
kfree(submit->pmrs);
kfree(submit);
}
@@ -395,6 +405,7 @@ void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct etnaviv_file_private *ctx = file->driver_priv;
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_submit *args = data;
struct drm_etnaviv_gem_submit_reloc *relocs;
@@ -503,10 +514,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto err_submit_objects;
- ret = submit_lock_objects(submit, &ticket);
- if (ret)
- goto err_submit_objects;
-
if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
relocs, args->nr_relocs)) {
ret = -EINVAL;
@@ -521,10 +528,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
- ret = submit_fence_sync(submit);
- if (ret)
- goto err_submit_objects;
-
ret = submit_pin_objects(submit);
if (ret)
goto err_submit_objects;
@@ -539,9 +542,16 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects;
memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
- submit->cmdbuf.user_size = ALIGN(args->stream_size, 8);
- ret = etnaviv_gpu_submit(gpu, submit);
+ ret = submit_lock_objects(submit, &ticket);
+ if (ret)
+ goto err_submit_objects;
+
+ ret = submit_fence_sync(submit);
+ if (ret)
+ goto err_submit_objects;
+
+ ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
if (ret)
goto err_submit_objects;
@@ -563,7 +573,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
}
args->fence_fd = out_fence_fd;
- args->fence = submit->out_fence->seqno;
+ args->fence = submit->out_fence_id;
err_submit_objects:
etnaviv_submit_put(submit);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 21d0d22f1168..8a88799bf79b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -26,19 +26,21 @@
#include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
#include "etnaviv_perfmon.h"
+#include "etnaviv_sched.h"
#include "common.xml.h"
#include "state.xml.h"
#include "state_hi.xml.h"
#include "cmdstream.xml.h"
+#ifndef PHYS_OFFSET
+#define PHYS_OFFSET 0
+#endif
+
static const struct platform_device_id gpu_ids[] = {
{ .name = "etnaviv-gpu,2d" },
{ },
};
-static bool etnaviv_dump_core = true;
-module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
-
/*
* Driver functions:
*/
@@ -82,6 +84,30 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.minor_features5;
break;
+ case ETNAVIV_PARAM_GPU_FEATURES_7:
+ *value = gpu->identity.minor_features6;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_8:
+ *value = gpu->identity.minor_features7;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_9:
+ *value = gpu->identity.minor_features8;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_10:
+ *value = gpu->identity.minor_features9;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_11:
+ *value = gpu->identity.minor_features10;
+ break;
+
+ case ETNAVIV_PARAM_GPU_FEATURES_12:
+ *value = gpu->identity.minor_features11;
+ break;
+
case ETNAVIV_PARAM_GPU_STREAM_COUNT:
*value = gpu->identity.stream_count;
break;
@@ -348,6 +374,13 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
gpu->identity.model, gpu->identity.revision);
+ /*
+ * If there is a match in the HWDB, we aren't interested in the
+ * remaining register values, as they might be wrong.
+ */
+ if (etnaviv_fill_identity_from_hwdb(gpu))
+ return;
+
gpu->identity.features = gpu_read(gpu, VIVS_HI_CHIP_FEATURE);
/* Disable fast clear on GC700. */
@@ -448,9 +481,14 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
control |= VIVS_HI_CLOCK_CONTROL_ISOLATE_GPU;
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
- /* set soft reset. */
- control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
- gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+ if (gpu->sec_mode == ETNA_SEC_KERNEL) {
+ gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL,
+ VIVS_MMUv2_AHB_CONTROL_RESET);
+ } else {
+ /* set soft reset. */
+ control |= VIVS_HI_CLOCK_CONTROL_SOFT_RESET;
+ gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control);
+ }
/* wait for reset. */
usleep_range(10, 20);
@@ -561,6 +599,12 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
gpu_write(gpu, VIVS_FE_COMMAND_CONTROL,
VIVS_FE_COMMAND_CONTROL_ENABLE |
VIVS_FE_COMMAND_CONTROL_PREFETCH(prefetch));
+
+ if (gpu->sec_mode == ETNA_SEC_KERNEL) {
+ gpu_write(gpu, VIVS_MMUv2_SEC_COMMAND_CONTROL,
+ VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE |
+ VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch));
+ }
}
static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
@@ -634,6 +678,12 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_BUS_CONFIG, bus_config);
}
+ if (gpu->sec_mode == ETNA_SEC_KERNEL) {
+ u32 val = gpu_read(gpu, VIVS_MMUv2_AHB_CONTROL);
+ val |= VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS;
+ gpu_write(gpu, VIVS_MMUv2_AHB_CONTROL, val);
+ }
+
/* setup the pulse eater */
etnaviv_gpu_setup_pulse_eater(gpu);
@@ -696,6 +746,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
gpu->identity.features &= ~chipFeatures_FAST_CLEAR;
}
+ /*
+ * On cores with security features supported, we claim control over the
+ * security states.
+ */
+ if ((gpu->identity.minor_features7 & chipMinorFeatures7_BIT_SECURITY) &&
+ (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB))
+ gpu->sec_mode = ETNA_SEC_KERNEL;
+
ret = etnaviv_hw_reset(gpu);
if (ret) {
dev_err(gpu->dev, "GPU reset failed\n");
@@ -807,6 +865,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
verify_dma(gpu, &debug);
seq_puts(m, "\tfeatures\n");
+ seq_printf(m, "\t major_features: 0x%08x\n",
+ gpu->identity.features);
seq_printf(m, "\t minor_features0: 0x%08x\n",
gpu->identity.minor_features0);
seq_printf(m, "\t minor_features1: 0x%08x\n",
@@ -819,6 +879,18 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
gpu->identity.minor_features4);
seq_printf(m, "\t minor_features5: 0x%08x\n",
gpu->identity.minor_features5);
+ seq_printf(m, "\t minor_features6: 0x%08x\n",
+ gpu->identity.minor_features6);
+ seq_printf(m, "\t minor_features7: 0x%08x\n",
+ gpu->identity.minor_features7);
+ seq_printf(m, "\t minor_features8: 0x%08x\n",
+ gpu->identity.minor_features8);
+ seq_printf(m, "\t minor_features9: 0x%08x\n",
+ gpu->identity.minor_features9);
+ seq_printf(m, "\t minor_features10: 0x%08x\n",
+ gpu->identity.minor_features10);
+ seq_printf(m, "\t minor_features11: 0x%08x\n",
+ gpu->identity.minor_features11);
seq_puts(m, "\tspecs\n");
seq_printf(m, "\t stream_count: %d\n",
@@ -912,38 +984,24 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
}
#endif
-/*
- * Hangcheck detection for locked gpu:
- */
-static void recover_worker(struct work_struct *work)
+void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
{
- struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
- recover_work);
unsigned long flags;
unsigned int i = 0;
- dev_err(gpu->dev, "hangcheck recover!\n");
+ dev_err(gpu->dev, "recover hung GPU!\n");
if (pm_runtime_get_sync(gpu->dev) < 0)
return;
mutex_lock(&gpu->lock);
- /* Only catch the first event, or when manually re-armed */
- if (etnaviv_dump_core) {
- etnaviv_core_dump(gpu);
- etnaviv_dump_core = false;
- }
-
etnaviv_hw_reset(gpu);
/* complete all events, the GPU won't do it after the reset */
spin_lock_irqsave(&gpu->event_spinlock, flags);
- for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) {
- dma_fence_signal(gpu->event[i].fence);
- gpu->event[i].fence = NULL;
+ for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
complete(&gpu->event_free);
- }
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock_irqrestore(&gpu->event_spinlock, flags);
gpu->completed_fence = gpu->active_fence;
@@ -955,56 +1013,6 @@ static void recover_worker(struct work_struct *work)
mutex_unlock(&gpu->lock);
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
-
- /* Retire the buffer objects in a work */
- queue_work(gpu->wq, &gpu->retire_work);
-}
-
-static void hangcheck_timer_reset(struct etnaviv_gpu *gpu)
-{
- DBG("%s", dev_name(gpu->dev));
- mod_timer(&gpu->hangcheck_timer,
- round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES));
-}
-
-static void hangcheck_handler(struct timer_list *t)
-{
- struct etnaviv_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
- u32 fence = gpu->completed_fence;
- bool progress = false;
-
- if (fence != gpu->hangcheck_fence) {
- gpu->hangcheck_fence = fence;
- progress = true;
- }
-
- if (!progress) {
- u32 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
- int change = dma_addr - gpu->hangcheck_dma_addr;
-
- if (change < 0 || change > 16) {
- gpu->hangcheck_dma_addr = dma_addr;
- progress = true;
- }
- }
-
- if (!progress && fence_after(gpu->active_fence, fence)) {
- dev_err(gpu->dev, "hangcheck detected gpu lockup!\n");
- dev_err(gpu->dev, " completed fence: %u\n", fence);
- dev_err(gpu->dev, " active fence: %u\n",
- gpu->active_fence);
- queue_work(gpu->wq, &gpu->recover_work);
- }
-
- /* if still more pending work, reset the hangcheck timer: */
- if (fence_after(gpu->active_fence, gpu->hangcheck_fence))
- hangcheck_timer_reset(gpu);
-}
-
-static void hangcheck_disable(struct etnaviv_gpu *gpu)
-{
- del_timer_sync(&gpu->hangcheck_timer);
- cancel_work_sync(&gpu->recover_work);
}
/* fence object management */
@@ -1080,54 +1088,6 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
return &f->base;
}
-int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
- unsigned int context, bool exclusive, bool explicit)
-{
- struct reservation_object *robj = etnaviv_obj->resv;
- struct reservation_object_list *fobj;
- struct dma_fence *fence;
- int i, ret;
-
- if (!exclusive) {
- ret = reservation_object_reserve_shared(robj);
- if (ret)
- return ret;
- }
-
- if (explicit)
- return 0;
-
- /*
- * If we have any shared fences, then the exclusive fence
- * should be ignored as it will already have been signalled.
- */
- fobj = reservation_object_get_list(robj);
- if (!fobj || fobj->shared_count == 0) {
- /* Wait on any existing exclusive fence which isn't our own */
- fence = reservation_object_get_excl(robj);
- if (fence && fence->context != context) {
- ret = dma_fence_wait(fence, true);
- if (ret)
- return ret;
- }
- }
-
- if (!exclusive || !fobj)
- return 0;
-
- for (i = 0; i < fobj->shared_count; i++) {
- fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(robj));
- if (fence->context != context) {
- ret = dma_fence_wait(fence, true);
- if (ret)
- return ret;
- }
- }
-
- return 0;
-}
-
/*
* event management:
*/
@@ -1194,67 +1154,47 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event)
/*
* Cmdstream submission/retirement:
*/
-
-static void retire_worker(struct work_struct *work)
-{
- struct etnaviv_gpu *gpu = container_of(work, struct etnaviv_gpu,
- retire_work);
- u32 fence = gpu->completed_fence;
- struct etnaviv_gem_submit *submit, *tmp;
- LIST_HEAD(retire_list);
-
- mutex_lock(&gpu->lock);
- list_for_each_entry_safe(submit, tmp, &gpu->active_submit_list, node) {
- if (!dma_fence_is_signaled(submit->out_fence))
- break;
-
- list_move(&submit->node, &retire_list);
- }
-
- gpu->retired_fence = fence;
-
- mutex_unlock(&gpu->lock);
-
- list_for_each_entry_safe(submit, tmp, &retire_list, node)
- etnaviv_submit_put(submit);
-}
-
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
- u32 fence, struct timespec *timeout)
+ u32 id, struct timespec *timeout)
{
+ struct dma_fence *fence;
int ret;
- if (fence_after(fence, gpu->next_fence)) {
- DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
- fence, gpu->next_fence);
- return -EINVAL;
- }
+ /*
+ * Look up the fence and take a reference. We might still find a fence
+ * whose refcount has already dropped to zero. dma_fence_get_rcu
+ * pretends we didn't find a fence in that case.
+ */
+ rcu_read_lock();
+ fence = idr_find(&gpu->fence_idr, id);
+ if (fence)
+ fence = dma_fence_get_rcu(fence);
+ rcu_read_unlock();
+
+ if (!fence)
+ return 0;
if (!timeout) {
/* No timeout was requested: just test for completion */
- ret = fence_completed(gpu, fence) ? 0 : -EBUSY;
+ ret = dma_fence_is_signaled(fence) ? 0 : -EBUSY;
} else {
unsigned long remaining = etnaviv_timeout_to_jiffies(timeout);
- ret = wait_event_interruptible_timeout(gpu->fence_event,
- fence_completed(gpu, fence),
- remaining);
- if (ret == 0) {
- DBG("timeout waiting for fence: %u (retired: %u completed: %u)",
- fence, gpu->retired_fence,
- gpu->completed_fence);
+ ret = dma_fence_wait_timeout(fence, true, remaining);
+ if (ret == 0)
ret = -ETIMEDOUT;
- } else if (ret != -ERESTARTSYS) {
+ else if (ret != -ERESTARTSYS)
ret = 0;
- }
+
}
+ dma_fence_put(fence);
return ret;
}
/*
* Wait for an object to become inactive. This, on it's own, is not race
- * free: the object is moved by the retire worker off the active list, and
+ * free: the object is moved by the scheduler off the active list, and
* then the iova is put. Moreover, the object could be re-submitted just
* after we notice that it's become inactive.
*
@@ -1343,16 +1283,19 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
/* add bo's to gpu's ring, and kick gpu: */
-int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_submit *submit)
+struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
{
+ struct etnaviv_gpu *gpu = submit->gpu;
+ struct dma_fence *gpu_fence;
unsigned int i, nr_events = 1, event[3];
int ret;
- ret = pm_runtime_get_sync(gpu->dev);
- if (ret < 0)
- return ret;
- submit->runtime_resumed = true;
+ if (!submit->runtime_resumed) {
+ ret = pm_runtime_get_sync(gpu->dev);
+ if (ret < 0)
+ return NULL;
+ submit->runtime_resumed = true;
+ }
/*
* if there are performance monitor requests we need to have
@@ -1367,21 +1310,20 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
ret = event_alloc(gpu, nr_events, event);
if (ret) {
DRM_ERROR("no free events\n");
- return ret;
+ return NULL;
}
mutex_lock(&gpu->lock);
- submit->out_fence = etnaviv_gpu_fence_alloc(gpu);
- if (!submit->out_fence) {
+ gpu_fence = etnaviv_gpu_fence_alloc(gpu);
+ if (!gpu_fence) {
for (i = 0; i < nr_events; i++)
event_free(gpu, event[i]);
- ret = -ENOMEM;
goto out_unlock;
}
- gpu->active_fence = submit->out_fence->seqno;
+ gpu->active_fence = gpu_fence->seqno;
if (submit->nr_pmrs) {
gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre;
@@ -1390,8 +1332,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
etnaviv_sync_point_queue(gpu, event[1]);
}
- kref_get(&submit->refcount);
- gpu->event[event[0]].fence = submit->out_fence;
+ gpu->event[event[0]].fence = gpu_fence;
+ submit->cmdbuf.user_size = submit->cmdbuf.size - 8;
etnaviv_buffer_queue(gpu, submit->exec_state, event[0],
&submit->cmdbuf);
@@ -1402,15 +1344,10 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
etnaviv_sync_point_queue(gpu, event[2]);
}
- list_add_tail(&submit->node, &gpu->active_submit_list);
-
- hangcheck_timer_reset(gpu);
- ret = 0;
-
out_unlock:
mutex_unlock(&gpu->lock);
- return ret;
+ return gpu_fence;
}
static void sync_point_worker(struct work_struct *work)
@@ -1428,9 +1365,35 @@ static void sync_point_worker(struct work_struct *work)
etnaviv_gpu_start_fe(gpu, addr + 2, 2);
}
-/*
- * Init/Cleanup:
- */
+static void dump_mmu_fault(struct etnaviv_gpu *gpu)
+{
+ u32 status_reg, status;
+ int i;
+
+ if (gpu->sec_mode == ETNA_SEC_NONE)
+ status_reg = VIVS_MMUv2_STATUS;
+ else
+ status_reg = VIVS_MMUv2_SEC_STATUS;
+
+ status = gpu_read(gpu, status_reg);
+ dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status);
+
+ for (i = 0; i < 4; i++) {
+ u32 address_reg;
+
+ if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4))))
+ continue;
+
+ if (gpu->sec_mode == ETNA_SEC_NONE)
+ address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i);
+ else
+ address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR;
+
+ dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i,
+ gpu_read(gpu, address_reg));
+ }
+}
+
static irqreturn_t irq_handler(int irq, void *data)
{
struct etnaviv_gpu *gpu = data;
@@ -1451,17 +1414,7 @@ static irqreturn_t irq_handler(int irq, void *data)
}
if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) {
- int i;
-
- dev_err_ratelimited(gpu->dev,
- "MMU fault status 0x%08x\n",
- gpu_read(gpu, VIVS_MMUv2_STATUS));
- for (i = 0; i < 4; i++) {
- dev_err_ratelimited(gpu->dev,
- "MMU %d fault addr 0x%08x\n",
- i, gpu_read(gpu,
- VIVS_MMUv2_EXCEPTION_ADDR(i)));
- }
+ dump_mmu_fault(gpu);
intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION;
}
@@ -1484,7 +1437,6 @@ static irqreturn_t irq_handler(int irq, void *data)
continue;
gpu->event[event].fence = NULL;
- dma_fence_signal(fence);
/*
* Events can be processed out of order. Eg,
@@ -1497,13 +1449,11 @@ static irqreturn_t irq_handler(int irq, void *data)
*/
if (fence_after(fence->seqno, gpu->completed_fence))
gpu->completed_fence = fence->seqno;
+ dma_fence_signal(fence);
event_free(gpu, event);
}
- /* Retire the buffer objects in a work */
- queue_work(gpu->wq, &gpu->retire_work);
-
ret = IRQ_HANDLED;
}
@@ -1514,6 +1464,12 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
{
int ret;
+ if (gpu->clk_reg) {
+ ret = clk_prepare_enable(gpu->clk_reg);
+ if (ret)
+ return ret;
+ }
+
if (gpu->clk_bus) {
ret = clk_prepare_enable(gpu->clk_bus);
if (ret)
@@ -1552,6 +1508,8 @@ static int etnaviv_gpu_clk_disable(struct etnaviv_gpu *gpu)
clk_disable_unprepare(gpu->clk_core);
if (gpu->clk_bus)
clk_disable_unprepare(gpu->clk_bus);
+ if (gpu->clk_reg)
+ clk_disable_unprepare(gpu->clk_reg);
return 0;
}
@@ -1675,41 +1633,49 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0);
if (!gpu->wq) {
- if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
- thermal_cooling_device_unregister(gpu->cooling);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_thermal;
}
+ ret = etnaviv_sched_init(gpu);
+ if (ret)
+ goto out_workqueue;
+
#ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev);
#else
ret = etnaviv_gpu_clk_enable(gpu);
#endif
- if (ret < 0) {
- destroy_workqueue(gpu->wq);
- if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
- thermal_cooling_device_unregister(gpu->cooling);
- return ret;
- }
+ if (ret < 0)
+ goto out_sched;
+
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
+ idr_init(&gpu->fence_idr);
spin_lock_init(&gpu->fence_spinlock);
- INIT_LIST_HEAD(&gpu->active_submit_list);
- INIT_WORK(&gpu->retire_work, retire_worker);
INIT_WORK(&gpu->sync_point_work, sync_point_worker);
- INIT_WORK(&gpu->recover_work, recover_worker);
init_waitqueue_head(&gpu->fence_event);
- timer_setup(&gpu->hangcheck_timer, hangcheck_handler, TIMER_DEFERRABLE);
-
priv->gpu[priv->num_gpus++] = gpu;
pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev);
return 0;
+
+out_sched:
+ etnaviv_sched_fini(gpu);
+
+out_workqueue:
+ destroy_workqueue(gpu->wq);
+
+out_thermal:
+ if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
+ thermal_cooling_device_unregister(gpu->cooling);
+
+ return ret;
}
static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
@@ -1719,11 +1685,11 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
DBG("%s", dev_name(gpu->dev));
- hangcheck_disable(gpu);
-
flush_workqueue(gpu->wq);
destroy_workqueue(gpu->wq);
+ etnaviv_sched_fini(gpu);
+
#ifdef CONFIG_PM
pm_runtime_get_sync(gpu->dev);
pm_runtime_put_sync_suspend(gpu->dev);
@@ -1745,6 +1711,7 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
}
gpu->drm = NULL;
+ idr_destroy(&gpu->fence_idr);
if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL))
thermal_cooling_device_unregister(gpu->cooling);
@@ -1762,6 +1729,7 @@ static const struct of_device_id etnaviv_gpu_match[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, etnaviv_gpu_match);
static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{
@@ -1775,6 +1743,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
+ mutex_init(&gpu->fence_idr_lock);
/* Map registers: */
gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
@@ -1796,6 +1765,11 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
}
/* Get Clocks: */
+ gpu->clk_reg = devm_clk_get(&pdev->dev, "reg");
+ DBG("clk_reg: %p", gpu->clk_reg);
+ if (IS_ERR(gpu->clk_reg))
+ gpu->clk_reg = NULL;
+
gpu->clk_bus = devm_clk_get(&pdev->dev, "bus");
DBG("clk_bus: %p", gpu->clk_bus);
if (IS_ERR(gpu->clk_bus))
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 7623905210dc..3c3005501846 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -38,21 +38,17 @@ struct etnaviv_chip_identity {
/* Supported minor feature fields. */
u32 minor_features0;
-
- /* Supported minor feature 1 fields. */
u32 minor_features1;
-
- /* Supported minor feature 2 fields. */
u32 minor_features2;
-
- /* Supported minor feature 3 fields. */
u32 minor_features3;
-
- /* Supported minor feature 4 fields. */
u32 minor_features4;
-
- /* Supported minor feature 5 fields. */
u32 minor_features5;
+ u32 minor_features6;
+ u32 minor_features7;
+ u32 minor_features8;
+ u32 minor_features9;
+ u32 minor_features10;
+ u32 minor_features11;
/* Number of streams supported. */
u32 stream_count;
@@ -88,6 +84,12 @@ struct etnaviv_chip_identity {
u8 varyings_count;
};
+enum etnaviv_sec_mode {
+ ETNA_SEC_NONE = 0,
+ ETNA_SEC_KERNEL,
+ ETNA_SEC_TZ
+};
+
struct etnaviv_event {
struct dma_fence *fence;
struct etnaviv_gem_submit *submit;
@@ -106,8 +108,10 @@ struct etnaviv_gpu {
struct device *dev;
struct mutex lock;
struct etnaviv_chip_identity identity;
+ enum etnaviv_sec_mode sec_mode;
struct etnaviv_file_private *lastctx;
struct workqueue_struct *wq;
+ struct drm_gpu_scheduler sched;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
@@ -122,23 +126,18 @@ struct etnaviv_gpu {
struct completion event_free;
spinlock_t event_spinlock;
- /* list of currently in-flight command buffers */
- struct list_head active_submit_list;
-
u32 idle_mask;
/* Fencing support */
+ struct mutex fence_idr_lock;
+ struct idr fence_idr;
u32 next_fence;
u32 active_fence;
u32 completed_fence;
- u32 retired_fence;
wait_queue_head_t fence_event;
u64 fence_context;
spinlock_t fence_spinlock;
- /* worker for handling active-list retiring: */
- struct work_struct retire_work;
-
/* worker for handling 'sync' points: */
struct work_struct sync_point_work;
int sync_point_event;
@@ -151,16 +150,10 @@ struct etnaviv_gpu {
/* Power Control: */
struct clk *clk_bus;
+ struct clk *clk_reg;
struct clk *clk_core;
struct clk *clk_shader;
- /* Hang Detction: */
-#define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */
-#define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD)
- struct timer_list hangcheck_timer;
- u32 hangcheck_fence;
- u32 hangcheck_dma_addr;
- struct work_struct recover_work;
unsigned int freq_scale;
unsigned long base_rate_core;
unsigned long base_rate_shader;
@@ -181,29 +174,22 @@ static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
return fence_after_eq(gpu->completed_fence, fence);
}
-static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence)
-{
- return fence_after_eq(gpu->retired_fence, fence);
-}
-
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
+bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
#endif
-int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
- unsigned int context, bool exclusive, bool implicit);
-
+void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
u32 fence, struct timespec *timeout);
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
-int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
- struct etnaviv_gem_submit *submit);
+struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
new file mode 100644
index 000000000000..ea08bb38caaf
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2018 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "etnaviv_gpu.h"
+
+static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
+ {
+ .model = 0x7000,
+ .revision = 0x6214,
+ .stream_count = 16,
+ .register_max = 64,
+ .thread_count = 1024,
+ .shader_core_count = 4,
+ .vertex_cache_size = 16,
+ .vertex_output_buffer_size = 1024,
+ .pixel_pipes = 2,
+ .instruction_count = 512,
+ .num_constants = 320,
+ .buffer_size = 0,
+ .varyings_count = 16,
+ .features = 0xe0287cad,
+ .minor_features0 = 0xc1799eff,
+ .minor_features1 = 0xfefbfad9,
+ .minor_features2 = 0xeb9d4fbf,
+ .minor_features3 = 0xedfffced,
+ .minor_features4 = 0xdb0dafc7,
+ .minor_features5 = 0xbb5ac333,
+ .minor_features6 = 0xfc8ee200,
+ .minor_features7 = 0x03fbfa6f,
+ .minor_features8 = 0x00ef0ef0,
+ .minor_features9 = 0x0edbf03c,
+ .minor_features10 = 0x90044250,
+ .minor_features11 = 0x00000024,
+ },
+};
+
+bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_chip_identity *ident = &gpu->identity;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
+ if (etnaviv_chip_identities[i].model == ident->model &&
+ etnaviv_chip_identities[i].revision == ident->revision) {
+ memcpy(ident, &etnaviv_chip_identities[i],
+ sizeof(*ident));
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
index 7a8c94731748..4b9b11ca6f03 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c
@@ -158,7 +158,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
-const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
+static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
.free = etnaviv_iommuv1_domain_free,
.map = etnaviv_iommuv1_map,
.unmap = etnaviv_iommuv1_unmap,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 1e956e266aa3..9752dbd5d28b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -40,6 +40,9 @@
struct etnaviv_iommuv2_domain {
struct etnaviv_iommu_domain base;
+ /* P(age) T(able) A(rray) */
+ u64 *pta_cpu;
+ dma_addr_t pta_dma;
/* M(aster) TLB aka first level pagetable */
u32 *mtlb_cpu;
dma_addr_t mtlb_dma;
@@ -114,6 +117,15 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
+ etnaviv_domain->pta_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
+ SZ_4K,
+ &etnaviv_domain->pta_dma,
+ GFP_KERNEL);
+ if (!etnaviv_domain->pta_cpu) {
+ ret = -ENOMEM;
+ goto fail_mem;
+ }
+
etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->mtlb_dma,
@@ -150,6 +162,11 @@ fail_mem:
etnaviv_domain->base.bad_page_cpu,
etnaviv_domain->base.bad_page_dma);
+ if (etnaviv_domain->pta_cpu)
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->pta_cpu,
+ etnaviv_domain->pta_dma);
+
if (etnaviv_domain->mtlb_cpu)
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
@@ -176,6 +193,10 @@ static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
etnaviv_domain->base.bad_page_dma);
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
+ etnaviv_domain->pta_cpu,
+ etnaviv_domain->pta_dma);
+
+ dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
@@ -216,7 +237,7 @@ static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
}
-void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
+static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(gpu->mmu->domain);
@@ -236,7 +257,60 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
-const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
+static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
+{
+ struct etnaviv_iommuv2_domain *etnaviv_domain =
+ to_etnaviv_domain(gpu->mmu->domain);
+ u16 prefetch;
+
+ /* If the MMU is already enabled the state is still there. */
+ if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
+ return;
+
+ gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
+ lower_32_bits(etnaviv_domain->pta_dma));
+ gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
+ upper_32_bits(etnaviv_domain->pta_dma));
+ gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
+
+ gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
+ lower_32_bits(etnaviv_domain->base.bad_page_dma));
+ gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
+ lower_32_bits(etnaviv_domain->base.bad_page_dma));
+ gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
+ VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
+ upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
+ VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
+ upper_32_bits(etnaviv_domain->base.bad_page_dma)));
+
+ etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
+ VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
+
+ /* trigger a PTA load through the FE */
+ prefetch = etnaviv_buffer_config_pta(gpu);
+ etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
+ prefetch);
+ etnaviv_gpu_wait_idle(gpu, 100);
+
+ gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
+}
+
+void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
+{
+ switch (gpu->sec_mode) {
+ case ETNA_SEC_NONE:
+ etnaviv_iommuv2_restore_nonsec(gpu);
+ break;
+ case ETNA_SEC_KERNEL:
+ etnaviv_iommuv2_restore_sec(gpu);
+ break;
+ default:
+ WARN(1, "unhandled GPU security mode\n");
+ break;
+ }
+}
+
+static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
.free = etnaviv_iommuv2_domain_free,
.map = etnaviv_iommuv2_map,
.unmap = etnaviv_iommuv2_unmap,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index d113fe06e6b5..49e049713a52 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -29,7 +29,7 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
size_t pgsize = SZ_4K;
if (!IS_ALIGNED(iova | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
+ pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
iova, size, pgsize);
return;
}
@@ -54,7 +54,7 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
int ret = 0;
if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
- pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
+ pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
iova, &paddr, size, pgsize);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
new file mode 100644
index 000000000000..6cf0775dbcd7
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kthread.h>
+
+#include "etnaviv_drv.h"
+#include "etnaviv_dump.h"
+#include "etnaviv_gem.h"
+#include "etnaviv_gpu.h"
+#include "etnaviv_sched.h"
+
+static int etnaviv_job_hang_limit = 0;
+module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
+static int etnaviv_hw_jobs_limit = 4;
+module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
+
+static struct dma_fence *
+etnaviv_sched_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *entity)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct dma_fence *fence;
+ int i;
+
+ if (unlikely(submit->in_fence)) {
+ fence = submit->in_fence;
+ submit->in_fence = NULL;
+
+ if (!dma_fence_is_signaled(fence))
+ return fence;
+
+ dma_fence_put(fence);
+ }
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
+ int j;
+
+ if (bo->excl) {
+ fence = bo->excl;
+ bo->excl = NULL;
+
+ if (!dma_fence_is_signaled(fence))
+ return fence;
+
+ dma_fence_put(fence);
+ }
+
+ for (j = 0; j < bo->nr_shared; j++) {
+ if (!bo->shared[j])
+ continue;
+
+ fence = bo->shared[j];
+ bo->shared[j] = NULL;
+
+ if (!dma_fence_is_signaled(fence))
+ return fence;
+
+ dma_fence_put(fence);
+ }
+ kfree(bo->shared);
+ bo->nr_shared = 0;
+ bo->shared = NULL;
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct dma_fence *fence = NULL;
+
+ if (likely(!sched_job->s_fence->finished.error))
+ fence = etnaviv_gpu_submit(submit);
+ else
+ dev_dbg(submit->gpu->dev, "skipping bad job\n");
+
+ return fence;
+}
+
+static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+ struct etnaviv_gpu *gpu = submit->gpu;
+
+ /* block scheduler */
+ kthread_park(gpu->sched.thread);
+ drm_sched_hw_job_reset(&gpu->sched, sched_job);
+
+ /* get the GPU back into the init state */
+ etnaviv_core_dump(gpu);
+ etnaviv_gpu_recover_hang(gpu);
+
+ /* restart scheduler after GPU is usable again */
+ drm_sched_job_recovery(&gpu->sched);
+ kthread_unpark(gpu->sched.thread);
+}
+
+static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
+{
+ struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
+
+ etnaviv_submit_put(submit);
+}
+
+static const struct drm_sched_backend_ops etnaviv_sched_ops = {
+ .dependency = etnaviv_sched_dependency,
+ .run_job = etnaviv_sched_run_job,
+ .timedout_job = etnaviv_sched_timedout_job,
+ .free_job = etnaviv_sched_free_job,
+};
+
+int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
+ struct etnaviv_gem_submit *submit)
+{
+ int ret;
+
+ ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
+ sched_entity, submit->cmdbuf.ctx);
+ if (ret)
+ return ret;
+
+ submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
+ mutex_lock(&submit->gpu->fence_idr_lock);
+ submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
+ submit->out_fence, 0,
+ INT_MAX, GFP_KERNEL);
+ mutex_unlock(&submit->gpu->fence_idr_lock);
+ if (submit->out_fence_id < 0)
+ return -ENOMEM;
+
+ /* the scheduler holds on to the job now */
+ kref_get(&submit->refcount);
+
+ drm_sched_entity_push_job(&submit->sched_job, sched_entity);
+
+ return 0;
+}
+
+int etnaviv_sched_init(struct etnaviv_gpu *gpu)
+{
+ int ret;
+
+ ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
+ etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
+ msecs_to_jiffies(500), dev_name(gpu->dev));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
+{
+ drm_sched_fini(&gpu->sched);
+}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.h b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
new file mode 100644
index 000000000000..097635fa78ae
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2017 Etnaviv Project
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ETNAVIV_SCHED_H__
+#define __ETNAVIV_SCHED_H__
+
+#include <drm/gpu_scheduler.h>
+
+struct etnaviv_gpu;
+
+static inline
+struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct etnaviv_gem_submit, sched_job);
+}
+
+int etnaviv_sched_init(struct etnaviv_gpu *gpu);
+void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
+int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
+ struct etnaviv_gem_submit *submit);
+
+#endif /* __ETNAVIV_SCHED_H__ */
diff --git a/drivers/gpu/drm/etnaviv/state.xml.h b/drivers/gpu/drm/etnaviv/state.xml.h
index c27c1484cfa9..421cb7cc0053 100644
--- a/drivers/gpu/drm/etnaviv/state.xml.h
+++ b/drivers/gpu/drm/etnaviv/state.xml.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STATE_XML
#define STATE_XML
@@ -9,14 +8,40 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state.xml ( 18882 bytes, from 2015-03-25 11:42:32)
-- common.xml ( 18437 bytes, from 2015-03-25 11:27:41)
-- state_hi.xml ( 23420 bytes, from 2015-03-25 11:47:21)
-- state_2d.xml ( 51549 bytes, from 2015-03-25 11:25:06)
-- state_3d.xml ( 54600 bytes, from 2015-03-25 11:25:19)
-- state_vg.xml ( 5973 bytes, from 2015-03-25 11:26:01)
-
-Copyright (C) 2015
+- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
+- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
+- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
+- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
+- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
+
+Copyright (C) 2012-2017 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
*/
@@ -24,9 +49,25 @@ Copyright (C) 2015
#define VARYING_COMPONENT_USE_USED 0x00000001
#define VARYING_COMPONENT_USE_POINTCOORD_X 0x00000002
#define VARYING_COMPONENT_USE_POINTCOORD_Y 0x00000003
+#define FE_DATA_TYPE_BYTE 0x00000000
+#define FE_DATA_TYPE_UNSIGNED_BYTE 0x00000001
+#define FE_DATA_TYPE_SHORT 0x00000002
+#define FE_DATA_TYPE_UNSIGNED_SHORT 0x00000003
+#define FE_DATA_TYPE_INT 0x00000004
+#define FE_DATA_TYPE_UNSIGNED_INT 0x00000005
+#define FE_DATA_TYPE_FLOAT 0x00000008
+#define FE_DATA_TYPE_HALF_FLOAT 0x00000009
+#define FE_DATA_TYPE_FIXED 0x0000000b
+#define FE_DATA_TYPE_INT_10_10_10_2 0x0000000c
+#define FE_DATA_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d
+#define FE_DATA_TYPE_BYTE_I 0x0000000e
+#define FE_DATA_TYPE_SHORT_I 0x0000000f
#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK 0x000000ff
#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT 0
#define FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_STRIDE__MASK)
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__MASK 0x00ff0000
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__SHIFT 16
+#define FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR(x) (((x) << FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__SHIFT) & FE_VERTEX_STREAM_CONTROL_VERTEX_DIVISOR__MASK)
#define VIVS_FE 0x00000000
#define VIVS_FE_VERTEX_ELEMENT_CONFIG(i0) (0x00000600 + 0x4*(i0))
@@ -34,17 +75,7 @@ Copyright (C) 2015
#define VIVS_FE_VERTEX_ELEMENT_CONFIG__LEN 0x00000010
#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK 0x0000000f
#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT 0
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_BYTE 0x00000000
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_BYTE 0x00000001
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_SHORT 0x00000002
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_SHORT 0x00000003
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT 0x00000004
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT 0x00000005
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FLOAT 0x00000008
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_HALF_FLOAT 0x00000009
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_FIXED 0x0000000b
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_INT_10_10_10_2 0x0000000c
-#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE_UNSIGNED_INT_10_10_10_2 0x0000000d
+#define VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_TYPE__MASK)
#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK 0x00000030
#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT 4
#define VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN(x) (((x) << VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__SHIFT) & VIVS_FE_VERTEX_ELEMENT_CONFIG_ENDIAN__MASK)
@@ -76,6 +107,7 @@ Copyright (C) 2015
#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_CHAR 0x00000000
#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_SHORT 0x00000001
#define VIVS_FE_INDEX_STREAM_CONTROL_TYPE_UNSIGNED_INT 0x00000002
+#define VIVS_FE_INDEX_STREAM_CONTROL_PRIMITIVE_RESTART 0x00000100
#define VIVS_FE_VERTEX_STREAM_BASE_ADDR 0x0000064c
@@ -151,6 +183,8 @@ Copyright (C) 2015
#define VIVS_FE_AUTO_FLUSH 0x00000670
+#define VIVS_FE_PRIMITIVE_RESTART_INDEX 0x00000674
+
#define VIVS_FE_UNK00678 0x00000678
#define VIVS_FE_UNK0067C 0x0000067c
@@ -163,17 +197,40 @@ Copyright (C) 2015
#define VIVS_FE_VERTEX_STREAMS_CONTROL(i0) (0x000006a0 + 0x4*(i0))
-#define VIVS_FE_UNK00700(i0) (0x00000700 + 0x4*(i0))
-#define VIVS_FE_UNK00700__ESIZE 0x00000004
-#define VIVS_FE_UNK00700__LEN 0x00000010
+#define VIVS_FE_GENERIC_ATTRIB(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_FE_GENERIC_ATTRIB__ESIZE 0x00000004
+#define VIVS_FE_GENERIC_ATTRIB__LEN 0x00000010
+
+#define VIVS_FE_GENERIC_ATTRIB_UNK006C0(i0) (0x000006c0 + 0x4*(i0))
+
+#define VIVS_FE_GENERIC_ATTRIB_UNK00700(i0) (0x00000700 + 0x4*(i0))
+
+#define VIVS_FE_GENERIC_ATTRIB_UNK00740(i0) (0x00000740 + 0x4*(i0))
+
+#define VIVS_FE_GENERIC_ATTRIB_SCALE(i0) (0x00000780 + 0x4*(i0))
+
+#define VIVS_FE_HALTI5_UNK007C4 0x000007c4
+
+#define VIVS_FE_HALTI5_UNK007D0(i0) (0x000007d0 + 0x4*(i0))
+#define VIVS_FE_HALTI5_UNK007D0__ESIZE 0x00000004
+#define VIVS_FE_HALTI5_UNK007D0__LEN 0x00000002
+
+#define VIVS_FE_HALTI5_UNK007D8 0x000007d8
+
+#define VIVS_FE_DESC_START 0x000007dc
+
+#define VIVS_FE_DESC_END 0x000007e0
+
+#define VIVS_FE_DESC_AVAIL 0x000007e4
+#define VIVS_FE_DESC_AVAIL_COUNT__MASK 0x0000007f
+#define VIVS_FE_DESC_AVAIL_COUNT__SHIFT 0
+#define VIVS_FE_DESC_AVAIL_COUNT(x) (((x) << VIVS_FE_DESC_AVAIL_COUNT__SHIFT) & VIVS_FE_DESC_AVAIL_COUNT__MASK)
+
+#define VIVS_FE_FENCE_WAIT_DATA_LOW 0x000007e8
-#define VIVS_FE_UNK00740(i0) (0x00000740 + 0x4*(i0))
-#define VIVS_FE_UNK00740__ESIZE 0x00000004
-#define VIVS_FE_UNK00740__LEN 0x00000010
+#define VIVS_FE_FENCE_WAIT_DATA_HIGH 0x000007f4
-#define VIVS_FE_UNK00780(i0) (0x00000780 + 0x4*(i0))
-#define VIVS_FE_UNK00780__ESIZE 0x00000004
-#define VIVS_FE_UNK00780__LEN 0x00000010
+#define VIVS_FE_ROBUSTNESS_UNK007F8 0x000007f8
#define VIVS_GL 0x00000000
@@ -188,6 +245,7 @@ Copyright (C) 2015
#define VIVS_GL_EVENT_EVENT_ID(x) (((x) << VIVS_GL_EVENT_EVENT_ID__SHIFT) & VIVS_GL_EVENT_EVENT_ID__MASK)
#define VIVS_GL_EVENT_FROM_FE 0x00000020
#define VIVS_GL_EVENT_FROM_PE 0x00000040
+#define VIVS_GL_EVENT_FROM_BLT 0x00000080
#define VIVS_GL_EVENT_SOURCE__MASK 0x00001f00
#define VIVS_GL_EVENT_SOURCE__SHIFT 8
#define VIVS_GL_EVENT_SOURCE(x) (((x) << VIVS_GL_EVENT_SOURCE__SHIFT) & VIVS_GL_EVENT_SOURCE__MASK)
@@ -199,6 +257,9 @@ Copyright (C) 2015
#define VIVS_GL_SEMAPHORE_TOKEN_TO__MASK 0x00001f00
#define VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT 8
#define VIVS_GL_SEMAPHORE_TOKEN_TO(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_TO__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_TO__MASK)
+#define VIVS_GL_SEMAPHORE_TOKEN_UNK28__MASK 0x30000000
+#define VIVS_GL_SEMAPHORE_TOKEN_UNK28__SHIFT 28
+#define VIVS_GL_SEMAPHORE_TOKEN_UNK28(x) (((x) << VIVS_GL_SEMAPHORE_TOKEN_UNK28__SHIFT) & VIVS_GL_SEMAPHORE_TOKEN_UNK28__MASK)
#define VIVS_GL_FLUSH_CACHE 0x0000380c
#define VIVS_GL_FLUSH_CACHE_DEPTH 0x00000001
@@ -208,6 +269,10 @@ Copyright (C) 2015
#define VIVS_GL_FLUSH_CACHE_TEXTUREVS 0x00000010
#define VIVS_GL_FLUSH_CACHE_SHADER_L1 0x00000020
#define VIVS_GL_FLUSH_CACHE_SHADER_L2 0x00000040
+#define VIVS_GL_FLUSH_CACHE_UNK10 0x00000400
+#define VIVS_GL_FLUSH_CACHE_UNK11 0x00000800
+#define VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK12 0x00001000
+#define VIVS_GL_FLUSH_CACHE_DESCRIPTOR_UNK13 0x00002000
#define VIVS_GL_FLUSH_MMU 0x00003810
#define VIVS_GL_FLUSH_MMU_FLUSH_FEMMU 0x00000001
@@ -244,30 +309,8 @@ Copyright (C) 2015
#define VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM(x) (((x) << VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__SHIFT) & VIVS_GL_VARYING_TOTAL_COMPONENTS_NUM__MASK)
#define VIVS_GL_VARYING_NUM_COMPONENTS 0x00003820
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK 0x00000007
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT 0
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR0(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR0__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK 0x00000070
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT 4
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR1(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR1__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK 0x00000700
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT 8
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR2(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR2__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK 0x00007000
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT 12
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR3(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR3__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK 0x00070000
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT 16
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR4(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR4__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK 0x00700000
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT 20
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR5(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR5__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK 0x07000000
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT 24
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR6(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR6__MASK)
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK 0x70000000
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT 28
-#define VIVS_GL_VARYING_NUM_COMPONENTS_VAR7(x) (((x) << VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__SHIFT) & VIVS_GL_VARYING_NUM_COMPONENTS_VAR7__MASK)
+
+#define VIVS_GL_OCCLUSION_QUERY_ADDR 0x00003824
#define VIVS_GL_VARYING_COMPONENT_USE(i0) (0x00003828 + 0x4*(i0))
#define VIVS_GL_VARYING_COMPONENT_USE__ESIZE 0x00000004
@@ -321,6 +364,10 @@ Copyright (C) 2015
#define VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT 30
#define VIVS_GL_VARYING_COMPONENT_USE_COMP15(x) (((x) << VIVS_GL_VARYING_COMPONENT_USE_COMP15__SHIFT) & VIVS_GL_VARYING_COMPONENT_USE_COMP15__MASK)
+#define VIVS_GL_UNK0382C 0x0000382c
+
+#define VIVS_GL_OCCLUSION_QUERY_CONTROL 0x00003830
+
#define VIVS_GL_UNK03834 0x00003834
#define VIVS_GL_UNK03838 0x00003838
@@ -332,8 +379,58 @@ Copyright (C) 2015
#define VIVS_GL_CONTEXT_POINTER 0x00003850
+#define VIVS_GL_UNK03854 0x00003854
+
+#define VIVS_GL_BUG_FIXES 0x00003860
+
+#define VIVS_GL_FENCE_OUT_ADDRESS 0x00003868
+
+#define VIVS_GL_FENCE_OUT_DATA_LOW 0x0000386c
+
+#define VIVS_GL_HALTI5_UNK03884 0x00003884
+
+#define VIVS_GL_HALTI5_SH_SPECIALS 0x00003888
+#define VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__MASK 0x0000007f
+#define VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__SHIFT 0
+#define VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_VS_PSIZE_OUT__MASK)
+#define VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__MASK 0x00007f00
+#define VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__SHIFT 8
+#define VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_PS_PCOORD_IN__MASK)
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK16__MASK 0x007f0000
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK16__SHIFT 16
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK16(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_UNK16__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_UNK16__MASK)
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK24__MASK 0xff000000
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK24__SHIFT 24
+#define VIVS_GL_HALTI5_SH_SPECIALS_UNK24(x) (((x) << VIVS_GL_HALTI5_SH_SPECIALS_UNK24__SHIFT) & VIVS_GL_HALTI5_SH_SPECIALS_UNK24__MASK)
+
+#define VIVS_GL_GS_UNK0388C 0x0000388c
+
+#define VIVS_GL_FENCE_OUT_DATA_HIGH 0x00003898
+
+#define VIVS_GL_SHADER_INDEX 0x0000389c
+
+#define VIVS_GL_GS_UNK038A0(i0) (0x000038a0 + 0x4*(i0))
+#define VIVS_GL_GS_UNK038A0__ESIZE 0x00000004
+#define VIVS_GL_GS_UNK038A0__LEN 0x00000008
+
+#define VIVS_GL_HALTI5_UNK038C0(i0) (0x000038c0 + 0x4*(i0))
+#define VIVS_GL_HALTI5_UNK038C0__ESIZE 0x00000004
+#define VIVS_GL_HALTI5_UNK038C0__LEN 0x00000010
+
+#define VIVS_GL_SECURITY_UNK3900 0x00003900
+
+#define VIVS_GL_SECURITY_UNK3904 0x00003904
+
#define VIVS_GL_UNK03A00 0x00003a00
+#define VIVS_GL_UNK03A04 0x00003a04
+
+#define VIVS_GL_UNK03A08 0x00003a08
+
+#define VIVS_GL_UNK03A0C 0x00003a0c
+
+#define VIVS_GL_UNK03A10 0x00003a10
+
#define VIVS_GL_STALL_TOKEN 0x00003c00
#define VIVS_GL_STALL_TOKEN_FROM__MASK 0x0000001f
#define VIVS_GL_STALL_TOKEN_FROM__SHIFT 0
@@ -344,6 +441,59 @@ Copyright (C) 2015
#define VIVS_GL_STALL_TOKEN_FLIP0 0x40000000
#define VIVS_GL_STALL_TOKEN_FLIP1 0x80000000
+#define VIVS_NFE 0x00000000
+
+#define VIVS_NFE_VERTEX_STREAMS(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_NFE_VERTEX_STREAMS__ESIZE 0x00000004
+#define VIVS_NFE_VERTEX_STREAMS__LEN 0x00000010
+
+#define VIVS_NFE_VERTEX_STREAMS_BASE_ADDR(i0) (0x00014600 + 0x4*(i0))
+
+#define VIVS_NFE_VERTEX_STREAMS_CONTROL(i0) (0x00014640 + 0x4*(i0))
+
+#define VIVS_NFE_VERTEX_STREAMS_UNK14680(i0) (0x00014680 + 0x4*(i0))
+
+#define VIVS_NFE_VERTEX_STREAMS_ROBUSTNESS_UNK146C0(i0) (0x000146c0 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB(i0) (0x00000000 + 0x4*(i0))
+#define VIVS_NFE_GENERIC_ATTRIB__ESIZE 0x00000004
+#define VIVS_NFE_GENERIC_ATTRIB__LEN 0x00000020
+
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0(i0) (0x00017800 + 0x4*(i0))
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__MASK 0x0000000f
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__SHIFT 0
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_TYPE__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__MASK 0x00000030
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__SHIFT 4
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_ENDIAN__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__MASK 0x00000700
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__SHIFT 8
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_STREAM__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__MASK 0x00003000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__SHIFT 12
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NUM__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE__MASK 0x0000c000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE__SHIFT 14
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE_OFF 0x00000000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_NORMALIZE_ON 0x00008000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__MASK 0x00ff0000
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__SHIFT 16
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG0_START__MASK)
+
+#define VIVS_NFE_GENERIC_ATTRIB_UNK17880(i0) (0x00017880 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_UNK17900(i0) (0x00017900 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_UNK17980(i0) (0x00017980 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_SCALE(i0) (0x00017a00 + 0x4*(i0))
+
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1(i0) (0x00017a80 + 0x4*(i0))
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__MASK 0x000000ff
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__SHIFT 0
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END(x) (((x) << VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__SHIFT) & VIVS_NFE_GENERIC_ATTRIB_CONFIG1_END__MASK)
+#define VIVS_NFE_GENERIC_ATTRIB_CONFIG1_NONCONSECUTIVE 0x00000800
+
#define VIVS_DUMMY 0x00000000
#define VIVS_DUMMY_DUMMY 0x0003fffc
diff --git a/drivers/gpu/drm/etnaviv/state_3d.xml.h b/drivers/gpu/drm/etnaviv/state_3d.xml.h
index 73a97d35c51b..ebbd4fcf3096 100644
--- a/drivers/gpu/drm/etnaviv/state_3d.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_3d.xml.h
@@ -7,4 +7,9 @@
#define VIVS_TS_FLUSH_CACHE 0x00001650
#define VIVS_TS_FLUSH_CACHE_FLUSH 0x00000001
+#define VIVS_NTE_DESCRIPTOR_FLUSH 0x00014c44
+#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__MASK 0xf0000000
+#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__SHIFT 28
+#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28(x) (((x) << VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__SHIFT) & VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__MASK)
+
#endif /* STATE_3D_XML */
diff --git a/drivers/gpu/drm/etnaviv/state_blt.xml.h b/drivers/gpu/drm/etnaviv/state_blt.xml.h
new file mode 100644
index 000000000000..daae55995def
--- /dev/null
+++ b/drivers/gpu/drm/etnaviv/state_blt.xml.h
@@ -0,0 +1,52 @@
+#ifndef STATE_BLT_XML
+#define STATE_BLT_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
+- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
+- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
+- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
+- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
+
+Copyright (C) 2012-2017 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+*/
+
+/* This is a cut-down version of the state_blt.xml.h file */
+
+#define VIVS_BLT_ENABLE 0x000140b8
+#define VIVS_BLT_ENABLE_ENABLE 0x00000001
+
+#endif /* STATE_BLT_XML */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 60808daf7e8d..41d8da2b6f4f 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -1,4 +1,3 @@
-/* SPDX-License-Identifier: GPL-2.0 */
#ifndef STATE_HI_XML
#define STATE_HI_XML
@@ -9,10 +8,40 @@ http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
-- state_hi.xml ( 25620 bytes, from 2016-08-19 22:07:37)
-- common.xml ( 20583 bytes, from 2016-06-07 05:22:38)
-
-Copyright (C) 2016
+- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
+- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
+- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
+- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
+- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
+- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
+- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
+- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
+- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
+
+Copyright (C) 2012-2018 by the following authors:
+- Wladimir J. van der Laan <laanwj@gmail.com>
+- Christian Gmeiner <christian.gmeiner@gmail.com>
+- Lucas Stach <l.stach@pengutronix.de>
+- Russell King <rmk@arm.linux.org.uk>
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sub license,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
*/
@@ -192,6 +221,9 @@ Copyright (C) 2016
#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT 0
#define VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT(x) (((x) << VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__SHIFT) & VIVS_HI_CHIP_SPECS_3_GPU_CORE_COUNT__MASK)
+#define VIVS_HI_COMPRESSION_FLAGS 0x00000090
+#define VIVS_HI_COMPRESSION_FLAGS_DEC300 0x00000040
+
#define VIVS_HI_CHIP_MINOR_FEATURE_4 0x00000094
#define VIVS_HI_CHIP_SPECS_4 0x0000009c
@@ -203,6 +235,10 @@ Copyright (C) 2016
#define VIVS_HI_CHIP_PRODUCT_ID 0x000000a8
+#define VIVS_HI_BLT_INTR 0x000000d4
+
+#define VIVS_HI_AUXBIT 0x000000ec
+
#define VIVS_PM 0x00000000
#define VIVS_PM_POWER_CONTROLS 0x00000100
@@ -239,6 +275,17 @@ Copyright (C) 2016
#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_TX 0x00000080
#define VIVS_PM_PULSE_EATER 0x0000010c
+#define VIVS_PM_PULSE_EATER_DISABLE 0x00000001
+#define VIVS_PM_PULSE_EATER_DVFS_PERIOD__MASK 0x0000ff00
+#define VIVS_PM_PULSE_EATER_DVFS_PERIOD__SHIFT 8
+#define VIVS_PM_PULSE_EATER_DVFS_PERIOD(x) (((x) << VIVS_PM_PULSE_EATER_DVFS_PERIOD__SHIFT) & VIVS_PM_PULSE_EATER_DVFS_PERIOD__MASK)
+#define VIVS_PM_PULSE_EATER_UNK16 0x00010000
+#define VIVS_PM_PULSE_EATER_UNK17 0x00020000
+#define VIVS_PM_PULSE_EATER_INTERNAL_DFS 0x00040000
+#define VIVS_PM_PULSE_EATER_UNK19 0x00080000
+#define VIVS_PM_PULSE_EATER_UNK20 0x00100000
+#define VIVS_PM_PULSE_EATER_UNK22 0x00400000
+#define VIVS_PM_PULSE_EATER_UNK23 0x00800000
#define VIVS_MMUv2 0x00000000
@@ -280,6 +327,68 @@ Copyright (C) 2016
#define VIVS_MMUv2_EXCEPTION_ADDR__ESIZE 0x00000004
#define VIVS_MMUv2_EXCEPTION_ADDR__LEN 0x00000004
+#define VIVS_MMUv2_PROFILE_BLT_READ 0x000001a4
+
+#define VIVS_MMUv2_PTA_CONFIG 0x000001ac
+#define VIVS_MMUv2_PTA_CONFIG_INDEX__MASK 0x0000ffff
+#define VIVS_MMUv2_PTA_CONFIG_INDEX__SHIFT 0
+#define VIVS_MMUv2_PTA_CONFIG_INDEX(x) (((x) << VIVS_MMUv2_PTA_CONFIG_INDEX__SHIFT) & VIVS_MMUv2_PTA_CONFIG_INDEX__MASK)
+#define VIVS_MMUv2_PTA_CONFIG_UNK16 0x00010000
+
+#define VIVS_MMUv2_AXI_POLICY(i0) (0x000001c0 + 0x4*(i0))
+#define VIVS_MMUv2_AXI_POLICY__ESIZE 0x00000004
+#define VIVS_MMUv2_AXI_POLICY__LEN 0x00000008
+
+#define VIVS_MMUv2_SEC_EXCEPTION_ADDR 0x00000380
+
+#define VIVS_MMUv2_SEC_STATUS 0x00000384
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION0__MASK 0x00000003
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION0__SHIFT 0
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION0(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION0__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION0__MASK)
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION1__MASK 0x00000030
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION1__SHIFT 4
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION1(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION1__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION1__MASK)
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION2__MASK 0x00000300
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION2__SHIFT 8
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION2(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION2__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION2__MASK)
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION3__MASK 0x00003000
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION3__SHIFT 12
+#define VIVS_MMUv2_SEC_STATUS_EXCEPTION3(x) (((x) << VIVS_MMUv2_SEC_STATUS_EXCEPTION3__SHIFT) & VIVS_MMUv2_SEC_STATUS_EXCEPTION3__MASK)
+
+#define VIVS_MMUv2_SEC_CONTROL 0x00000388
+#define VIVS_MMUv2_SEC_CONTROL_ENABLE 0x00000001
+
+#define VIVS_MMUv2_PTA_ADDRESS_LOW 0x0000038c
+
+#define VIVS_MMUv2_PTA_ADDRESS_HIGH 0x00000390
+
+#define VIVS_MMUv2_PTA_CONTROL 0x00000394
+#define VIVS_MMUv2_PTA_CONTROL_ENABLE 0x00000001
+
+#define VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW 0x00000398
+
+#define VIVS_MMUv2_SEC_SAFE_ADDR_LOW 0x0000039c
+
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG 0x000003a0
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__MASK 0x000000ff
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__SHIFT 0
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(x) (((x) << VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__SHIFT) & VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH__MASK)
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_UNK15 0x00008000
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__MASK 0x00ff0000
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__SHIFT 16
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(x) (((x) << VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__SHIFT) & VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH__MASK)
+#define VIVS_MMUv2_SAFE_ADDRESS_CONFIG_UNK31 0x80000000
+
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL 0x000003a4
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__MASK 0x0000ffff
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__SHIFT 0
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(x) (((x) << VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__SHIFT) & VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH__MASK)
+#define VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE 0x00010000
+
+#define VIVS_MMUv2_AHB_CONTROL 0x000003a8
+#define VIVS_MMUv2_AHB_CONTROL_RESET 0x00000001
+#define VIVS_MMUv2_AHB_CONTROL_NONSEC_ACCESS 0x00000002
+
#define VIVS_MC 0x00000000
#define VIVS_MC_MMU_FE_PAGE_TABLE 0x00000400
@@ -340,13 +449,13 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_HI_READ 0x0000046c
#define VIVS_MC_PROFILE_CONFIG0 0x00000470
-#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG0_FE__MASK 0x000000ff
#define VIVS_MC_PROFILE_CONFIG0_FE__SHIFT 0
#define VIVS_MC_PROFILE_CONFIG0_FE_RESET 0x0000000f
-#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG0_DE__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG0_DE__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG0_DE_RESET 0x00000f00
-#define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG0_PE__MASK 0x00ff0000
#define VIVS_MC_PROFILE_CONFIG0_PE__SHIFT 16
#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_COLOR_PIPE 0x00000000
#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_KILLED_BY_DEPTH_PIPE 0x00010000
@@ -354,7 +463,7 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_CONFIG0_PE_PIXEL_COUNT_DRAWN_BY_DEPTH_PIPE 0x00030000
#define VIVS_MC_PROFILE_CONFIG0_PE_PIXELS_RENDERED_2D 0x000b0000
#define VIVS_MC_PROFILE_CONFIG0_PE_RESET 0x000f0000
-#define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0x0f000000
+#define VIVS_MC_PROFILE_CONFIG0_SH__MASK 0xff000000
#define VIVS_MC_PROFILE_CONFIG0_SH__SHIFT 24
#define VIVS_MC_PROFILE_CONFIG0_SH_SHADER_CYCLES 0x04000000
#define VIVS_MC_PROFILE_CONFIG0_SH_PS_INST_COUNTER 0x07000000
@@ -368,7 +477,7 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_CONFIG0_SH_RESET 0x0f000000
#define VIVS_MC_PROFILE_CONFIG1 0x00000474
-#define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG1_PA__MASK 0x000000ff
#define VIVS_MC_PROFILE_CONFIG1_PA__SHIFT 0
#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_VTX_COUNTER 0x00000003
#define VIVS_MC_PROFILE_CONFIG1_PA_INPUT_PRIM_COUNTER 0x00000004
@@ -377,12 +486,12 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_CONFIG1_PA_TRIVIAL_REJECTED_COUNTER 0x00000007
#define VIVS_MC_PROFILE_CONFIG1_PA_CULLED_COUNTER 0x00000008
#define VIVS_MC_PROFILE_CONFIG1_PA_RESET 0x0000000f
-#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG1_SE__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG1_SE__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_TRIANGLE_COUNT 0x00000000
#define VIVS_MC_PROFILE_CONFIG1_SE_CULLED_LINES_COUNT 0x00000100
#define VIVS_MC_PROFILE_CONFIG1_SE_RESET 0x00000f00
-#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x000f0000
+#define VIVS_MC_PROFILE_CONFIG1_RA__MASK 0x00ff0000
#define VIVS_MC_PROFILE_CONFIG1_RA__SHIFT 16
#define VIVS_MC_PROFILE_CONFIG1_RA_VALID_PIXEL_COUNT 0x00000000
#define VIVS_MC_PROFILE_CONFIG1_RA_TOTAL_QUAD_COUNT 0x00010000
@@ -392,7 +501,7 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_CONFIG1_RA_PREFETCH_CACHE_MISS_COUNTER 0x000a0000
#define VIVS_MC_PROFILE_CONFIG1_RA_CULLED_QUAD_COUNT 0x000b0000
#define VIVS_MC_PROFILE_CONFIG1_RA_RESET 0x000f0000
-#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0x0f000000
+#define VIVS_MC_PROFILE_CONFIG1_TX__MASK 0xff000000
#define VIVS_MC_PROFILE_CONFIG1_TX__SHIFT 24
#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_BILINEAR_REQUESTS 0x00000000
#define VIVS_MC_PROFILE_CONFIG1_TX_TOTAL_TRILINEAR_REQUESTS 0x01000000
@@ -407,18 +516,21 @@ Copyright (C) 2016
#define VIVS_MC_PROFILE_CONFIG1_TX_RESET 0x0f000000
#define VIVS_MC_PROFILE_CONFIG2 0x00000478
-#define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x0000000f
+#define VIVS_MC_PROFILE_CONFIG2_MC__MASK 0x000000ff
#define VIVS_MC_PROFILE_CONFIG2_MC__SHIFT 0
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_PIPELINE 0x00000001
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_READ_REQ_8B_FROM_IP 0x00000002
#define VIVS_MC_PROFILE_CONFIG2_MC_TOTAL_WRITE_REQ_8B_FROM_PIPELINE 0x00000003
#define VIVS_MC_PROFILE_CONFIG2_MC_RESET 0x0000000f
-#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG2_HI__MASK 0x0000ff00
#define VIVS_MC_PROFILE_CONFIG2_HI__SHIFT 8
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_READ_REQUEST_STALLED 0x00000000
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_REQUEST_STALLED 0x00000100
#define VIVS_MC_PROFILE_CONFIG2_HI_AXI_CYCLES_WRITE_DATA_STALLED 0x00000200
#define VIVS_MC_PROFILE_CONFIG2_HI_RESET 0x00000f00
+#define VIVS_MC_PROFILE_CONFIG2_BLT__MASK 0xff000000
+#define VIVS_MC_PROFILE_CONFIG2_BLT__SHIFT 24
+#define VIVS_MC_PROFILE_CONFIG2_BLT_UNK0 0x00000000
#define VIVS_MC_PROFILE_CONFIG3 0x0000047c
@@ -432,7 +544,13 @@ Copyright (C) 2016
#define VIVS_MC_START_COMPOSITION 0x00000554
-#define VIVS_MC_128B_MERGE 0x00000558
+#define VIVS_MC_FLAGS 0x00000558
+#define VIVS_MC_FLAGS_128B_MERGE 0x00000001
+#define VIVS_MC_FLAGS_TPCV11_COMPRESSION 0x08000000
+
+#define VIVS_MC_L2_CACHE_CONFIG 0x0000055c
+
+#define VIVS_MC_PROFILE_L2_READ 0x00000564
#endif /* STATE_HI_XML */
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 0a100a288e6d..d29281231507 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2046,7 +2046,7 @@ static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
}
if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
- gvt_err("vgpu ppgtt mm is not fully destoried\n");
+ gvt_err("vgpu ppgtt mm is not fully destroyed\n");
if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
gvt_err("Why we still has spt not freed?\n");
@@ -2291,6 +2291,28 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
}
/**
+ * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
+ * @vgpu: a vGPU
+ *
+ * This function is called when invalidate all PPGTT instances of a vGPU.
+ *
+ */
+void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_mm *mm;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
+ if (mm->type == INTEL_GVT_MM_PPGTT) {
+ list_del_init(&mm->ppgtt_mm.lru_list);
+ if (mm->ppgtt_mm.shadowed)
+ invalidate_ppgtt_mm(mm);
+ }
+ }
+}
+
+/**
* intel_vgpu_reset_ggtt - reset the GGTT entry
* @vgpu: a vGPU
*
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index e831507e17c3..a8b369cd352b 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -194,6 +194,7 @@ struct intel_vgpu_gtt {
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
+void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 112f2ec7c25f..8c5d5d005854 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1767,6 +1767,10 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(CURBASE(PIPE_B), D_ALL);
MMIO_D(CURBASE(PIPE_C), D_ALL);
+ MMIO_D(CUR_FBC_CTL(PIPE_A), D_ALL);
+ MMIO_D(CUR_FBC_CTL(PIPE_B), D_ALL);
+ MMIO_D(CUR_FBC_CTL(PIPE_C), D_ALL);
+
MMIO_D(_MMIO(0x700ac), D_ALL);
MMIO_D(_MMIO(0x710ac), D_ALL);
MMIO_D(_MMIO(0x720ac), D_ALL);
@@ -2228,6 +2232,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_AUD_CFG(PIPE_A), D_ALL);
MMIO_D(HSW_AUD_PIN_ELD_CP_VLD, D_ALL);
+ MMIO_D(HSW_AUD_MISC_CTRL(PIPE_A), D_ALL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_A), D_ALL, NULL, NULL);
MMIO_DH(_MMIO(_TRANS_DDI_FUNC_CTL_B), D_ALL, NULL, NULL);
@@ -2559,6 +2564,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(WM_MISC, D_BDW);
MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW);
+ MMIO_D(_MMIO(0x6671c), D_BDW_PLUS);
MMIO_D(_MMIO(0x66c00), D_BDW_PLUS);
MMIO_D(_MMIO(0x66c04), D_BDW_PLUS);
@@ -2787,6 +2793,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
+ MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
MMIO_D(_MMIO(0x8f074), D_SKL | D_KBL);
@@ -2801,7 +2808,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
+ MMIO_D(RC6_LOCATION, D_SKL_PLUS);
MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 8a428678e4b5..520fe3d0a882 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -184,7 +184,7 @@ static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
return NULL;
}
-static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
+static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
dma_addr_t dma_addr)
{
struct gvt_dma *new, *itr;
@@ -192,7 +192,7 @@ static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new)
- return;
+ return -ENOMEM;
new->vgpu = vgpu;
new->gfn = gfn;
@@ -229,6 +229,7 @@ static void __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
vgpu->vdev.nr_cache_entries++;
+ return 0;
}
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
@@ -1586,11 +1587,12 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
entry = __gvt_cache_find_gfn(info->vgpu, gfn);
if (!entry) {
ret = gvt_dma_map_page(vgpu, gfn, dma_addr);
- if (ret) {
- mutex_unlock(&info->vgpu->vdev.cache_lock);
- return ret;
- }
- __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+ if (ret)
+ goto err_unlock;
+
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr);
+ if (ret)
+ goto err_unmap;
} else {
kref_get(&entry->ref);
*dma_addr = entry->dma_addr;
@@ -1598,6 +1600,12 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
mutex_unlock(&info->vgpu->vdev.cache_lock);
return 0;
+
+err_unmap:
+ gvt_dma_unmap_page(vgpu, gfn, *dma_addr);
+err_unlock:
+ mutex_unlock(&info->vgpu->vdev.cache_lock);
+ return ret;
}
static void __gvt_dma_release(struct kref *ref)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9b92b4e25a20..a55b4975c154 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -52,6 +52,29 @@ static void set_context_pdp_root_pointer(
pdp_pair[i].val = pdp[7 - i];
}
+static void update_shadow_pdps(struct intel_vgpu_workload *workload)
+{
+ struct intel_vgpu *vgpu = workload->vgpu;
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
+ struct drm_i915_gem_object *ctx_obj =
+ shadow_ctx->engine[ring_id].state->obj;
+ struct execlist_ring_context *shadow_ring_context;
+ struct page *page;
+
+ if (WARN_ON(!workload->shadow_mm))
+ return;
+
+ if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
+ return;
+
+ page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
+ shadow_ring_context = kmap(page);
+ set_context_pdp_root_pointer(shadow_ring_context,
+ (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
+ kunmap(page);
+}
+
static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
@@ -101,8 +124,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
#define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
+#define COPY_REG_MASKED(name) {\
+ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
+ + RING_CTX_OFF(name.val),\
+ &shadow_ring_context->name.val, 4);\
+ shadow_ring_context->name.val |= 0xffff << 16;\
+ }
- COPY_REG(ctx_ctrl);
+ COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
if (ring_id == RCS) {
@@ -111,9 +140,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG(rcs_indirect_ctx_offset);
}
#undef COPY_REG
-
- set_context_pdp_root_pointer(shadow_ring_context,
- (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
+#undef COPY_REG_MASKED
intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa +
@@ -509,6 +536,8 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
return ret;
}
+ update_shadow_pdps(workload);
+
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
gvt_vgpu_err("fail to vgpu sync oos pages\n");
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 41f76e86aa1f..2e0a02a80fe4 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -522,6 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
/* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) {
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
+ intel_vgpu_invalidate_ppgtt(vgpu);
/*fence will not be reset during virtual reset */
if (dmlr) {
intel_vgpu_reset_gtt(vgpu);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a5bd07338b46..0359d6f870b4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -471,10 +471,11 @@ static void __fence_set_priority(struct dma_fence *fence, int prio)
rq = to_request(fence);
engine = rq->engine;
- if (!engine->schedule)
- return;
- engine->schedule(rq, prio);
+ rcu_read_lock();
+ if (engine->schedule)
+ engine->schedule(rq, prio);
+ rcu_read_unlock();
}
static void fence_set_priority(struct dma_fence *fence, int prio)
@@ -2939,8 +2940,16 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
* calling engine->init_hw() and also writing the ELSP.
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
+ *
+ * Note that this needs to be a single atomic operation on the
+ * tasklet (flush existing tasks, prevent new tasks) to prevent
+ * a race between reset and set-wedged. It is not, so we do the best
+ * we can atm and make sure we don't lock the machine up in the more
+ * common case of recursively being called from set-wedged from inside
+ * i915_reset.
*/
- tasklet_kill(&engine->execlists.tasklet);
+ if (!atomic_read(&engine->execlists.tasklet.count))
+ tasklet_kill(&engine->execlists.tasklet);
tasklet_disable(&engine->execlists.tasklet);
/*
@@ -3214,8 +3223,11 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
*/
for_each_engine(engine, i915, id) {
i915_gem_reset_prepare_engine(engine);
+
engine->submit_request = nop_submit_request;
+ engine->schedule = NULL;
}
+ i915->caps.scheduler = 0;
/*
* Make sure no one is running the old callback before we proceed with
@@ -3233,11 +3245,8 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* start to complete all requests.
*/
engine->submit_request = nop_complete_submit_request;
- engine->schedule = NULL;
}
- i915->caps.scheduler = 0;
-
/*
* Make sure no request can slip through without getting completed by
* either this call here to intel_engine_init_global_seqno, or the one
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 964467b03e4d..d8feb9053e0c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -433,7 +433,7 @@ static u64 __get_rc6(struct drm_i915_private *i915)
return val;
}
-static u64 get_rc6(struct drm_i915_private *i915, bool locked)
+static u64 get_rc6(struct drm_i915_private *i915)
{
#if IS_ENABLED(CONFIG_PM)
unsigned long flags;
@@ -449,8 +449,7 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
* previously.
*/
- if (!locked)
- spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock_irqsave(&i915->pmu.lock, flags);
if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
@@ -459,12 +458,10 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
}
- if (!locked)
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
} else {
struct pci_dev *pdev = i915->drm.pdev;
struct device *kdev = &pdev->dev;
- unsigned long flags2;
/*
* We are runtime suspended.
@@ -473,10 +470,8 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
* on top of the last known real value, as the approximated RC6
* counter value.
*/
- if (!locked)
- spin_lock_irqsave(&i915->pmu.lock, flags);
-
- spin_lock_irqsave(&kdev->power.lock, flags2);
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+ spin_lock(&kdev->power.lock);
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
i915->pmu.suspended_jiffies_last =
@@ -486,14 +481,13 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
i915->pmu.suspended_jiffies_last;
val += jiffies - kdev->power.accounting_timestamp;
- spin_unlock_irqrestore(&kdev->power.lock, flags2);
+ spin_unlock(&kdev->power.lock);
val = jiffies_to_nsecs(val);
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
- if (!locked)
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
}
return val;
@@ -502,7 +496,7 @@ static u64 get_rc6(struct drm_i915_private *i915, bool locked)
#endif
}
-static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
+static u64 __i915_pmu_event_read(struct perf_event *event)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
@@ -540,7 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
val = count_interrupts(i915);
break;
case I915_PMU_RC6_RESIDENCY:
- val = get_rc6(i915, locked);
+ val = get_rc6(i915);
break;
}
}
@@ -555,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event)
again:
prev = local64_read(&hwc->prev_count);
- new = __i915_pmu_event_read(event, false);
+ new = __i915_pmu_event_read(event);
if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
goto again;
@@ -605,14 +599,14 @@ static void i915_pmu_enable(struct perf_event *event)
engine->pmu.enable_count[sample]++;
}
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+
/*
* Store the current counter value so we can report the correct delta
* for all listeners. Even when the event was already enabled and has
* an existing non-zero value.
*/
- local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
-
- spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
}
static void i915_pmu_disable(struct perf_event *event)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index d437beac3969..282f57630cc1 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1081,8 +1081,10 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
+ rcu_read_lock();
if (engine->schedule)
engine->schedule(request, request->ctx->priority);
+ rcu_read_unlock();
local_bh_disable();
i915_sw_fence_commit(&request->submit);
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 89ab0f70aa22..c6a7beabd58d 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -39,7 +39,7 @@
#define CTM_COEFF_NEGATIVE(coeff) (((coeff) & CTM_COEFF_SIGN) != 0)
#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
-#define LEGACY_LUT_LENGTH (sizeof(struct drm_color_lut) * 256)
+#define LEGACY_LUT_LENGTH 256
/* Post offset values for RGB->YCBCR conversion */
#define POSTOFF_RGB_TO_YUV_HI 0x800
@@ -79,7 +79,7 @@ static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state)
return !state->degamma_lut &&
!state->ctm &&
state->gamma_lut &&
- state->gamma_lut->length == LEGACY_LUT_LENGTH;
+ drm_color_lut_size(state->gamma_lut) == LEGACY_LUT_LENGTH;
}
/*
@@ -153,8 +153,7 @@ static void ilk_load_csc_matrix(struct drm_crtc_state *crtc_state)
ilk_load_ycbcr_conversion_matrix(intel_crtc);
return;
} else if (crtc_state->ctm) {
- struct drm_color_ctm *ctm =
- (struct drm_color_ctm *)crtc_state->ctm->data;
+ struct drm_color_ctm *ctm = crtc_state->ctm->data;
const u64 *input;
u64 temp[9];
@@ -262,8 +261,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
uint32_t mode;
if (state->ctm) {
- struct drm_color_ctm *ctm =
- (struct drm_color_ctm *) state->ctm->data;
+ struct drm_color_ctm *ctm = state->ctm->data;
uint16_t coeffs[9] = { 0, };
int i;
@@ -330,7 +328,7 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
}
if (blob) {
- struct drm_color_lut *lut = (struct drm_color_lut *) blob->data;
+ struct drm_color_lut *lut = blob->data;
for (i = 0; i < 256; i++) {
uint32_t word =
(drm_color_lut_extract(lut[i].red, 8) << 16) |
@@ -400,8 +398,7 @@ static void bdw_load_degamma_lut(struct drm_crtc_state *state)
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
if (state->degamma_lut) {
- struct drm_color_lut *lut =
- (struct drm_color_lut *) state->degamma_lut->data;
+ struct drm_color_lut *lut = state->degamma_lut->data;
for (i = 0; i < lut_size; i++) {
uint32_t word =
@@ -435,8 +432,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
offset);
if (state->gamma_lut) {
- struct drm_color_lut *lut =
- (struct drm_color_lut *) state->gamma_lut->data;
+ struct drm_color_lut *lut = state->gamma_lut->data;
for (i = 0; i < lut_size; i++) {
uint32_t word =
@@ -568,7 +564,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
}
if (state->degamma_lut) {
- lut = (struct drm_color_lut *) state->degamma_lut->data;
+ lut = state->degamma_lut->data;
lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.14 format. */
@@ -583,7 +579,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
}
if (state->gamma_lut) {
- lut = (struct drm_color_lut *) state->gamma_lut->data;
+ lut = state->gamma_lut->data;
lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
for (i = 0; i < lut_size; i++) {
/* Write LUT in U0.10 format. */
@@ -623,19 +619,17 @@ int intel_color_check(struct drm_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
size_t gamma_length, degamma_length;
- degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size *
- sizeof(struct drm_color_lut);
- gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size *
- sizeof(struct drm_color_lut);
+ degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
/*
* We allow both degamma & gamma luts at the right size or
* NULL.
*/
if ((!crtc_state->degamma_lut ||
- crtc_state->degamma_lut->length == degamma_length) &&
+ drm_color_lut_size(crtc_state->degamma_lut) == degamma_length) &&
(!crtc_state->gamma_lut ||
- crtc_state->gamma_lut->length == gamma_length))
+ drm_color_lut_size(crtc_state->gamma_lut) == gamma_length))
return 0;
/*
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ac8fc2a44ac6..dbcf1a0586f9 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3080,9 +3080,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
- intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
- (DDI_BUF_PORT_REVERSAL |
- DDI_A_4_LANES);
+ if (INTEL_GEN(dev_priv) >= 11)
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ DDI_BUF_PORT_REVERSAL;
+ else
+ intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ (DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 331084082545..3b48fd2561fe 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11059,24 +11059,17 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
static void __printf(3, 4)
pipe_config_err(bool adjust, const char *name, const char *format, ...)
{
- char *level;
- unsigned int category;
struct va_format vaf;
va_list args;
- if (adjust) {
- level = KERN_DEBUG;
- category = DRM_UT_KMS;
- } else {
- level = KERN_ERR;
- category = DRM_UT_NONE;
- }
-
va_start(args, format);
vaf.fmt = format;
vaf.va = &args;
- drm_printk(level, category, "mismatch in %s %pV", name, &vaf);
+ if (adjust)
+ drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
+ else
+ drm_err("mismatch in %s %pV", name, &vaf);
va_end(args);
}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index f9ad0e960263..32b1a6cdecfc 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -189,40 +189,55 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(regs))
- return PTR_ERR(regs);
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto free_drm;
+ }
priv->io_base = regs;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
+ if (!res)
+ return -EINVAL;
/* Simply ioremap since it may be a shared register zone */
regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!regs)
- return -EADDRNOTAVAIL;
+ if (!regs) {
+ ret = -EADDRNOTAVAIL;
+ goto free_drm;
+ }
priv->hhi = devm_regmap_init_mmio(dev, regs,
&meson_regmap_config);
if (IS_ERR(priv->hhi)) {
dev_err(&pdev->dev, "Couldn't create the HHI regmap\n");
- return PTR_ERR(priv->hhi);
+ ret = PTR_ERR(priv->hhi);
+ goto free_drm;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
+ if (!res)
+ return -EINVAL;
/* Simply ioremap since it may be a shared register zone */
regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!regs)
- return -EADDRNOTAVAIL;
+ if (!regs) {
+ ret = -EADDRNOTAVAIL;
+ goto free_drm;
+ }
priv->dmc = devm_regmap_init_mmio(dev, regs,
&meson_regmap_config);
if (IS_ERR(priv->dmc)) {
dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
- return PTR_ERR(priv->dmc);
+ ret = PTR_ERR(priv->dmc);
+ goto free_drm;
}
priv->vsync_irq = platform_get_irq(pdev, 0);
- drm_vblank_init(drm, 1);
+ ret = drm_vblank_init(drm, 1);
+ if (ret)
+ goto free_drm;
+
drm_mode_config_init(drm);
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
@@ -281,7 +296,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
return 0;
free_drm:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -300,7 +315,7 @@ static void meson_drv_unbind(struct device *dev)
drm_kms_helper_poll_fini(drm);
drm_fbdev_cma_fini(priv->fbdev);
drm_mode_config_cleanup(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index d49af17310c9..a393095aac1a 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -538,7 +538,6 @@ static irqreturn_t dw_hdmi_top_thread_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* TOFIX Enable support for non-vic modes */
static enum drm_mode_status
dw_hdmi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
@@ -555,12 +554,12 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
mode->vdisplay, mode->vsync_start,
mode->vsync_end, mode->vtotal, mode->type, mode->flags);
- /* For now, only accept VIC modes */
- if (!vic)
- return MODE_BAD;
-
- /* For now, filter by supported VIC modes */
- if (!meson_venc_hdmi_supported_vic(vic))
+ /* Check against non-VIC supported modes */
+ if (!vic) {
+ if (!meson_venc_hdmi_supported_mode(mode))
+ return MODE_BAD;
+ /* Check against supported VIC modes */
+ } else if (!meson_venc_hdmi_supported_vic(vic))
return MODE_BAD;
vclk_freq = mode->clock;
@@ -586,9 +585,14 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
/* Finally filter by configurable vclk frequencies */
switch (vclk_freq) {
+ case 25175:
+ case 40000:
case 54000:
+ case 65000:
case 74250:
+ case 108000:
case 148500:
+ case 162000:
case 297000:
case 594000:
return MODE_OK;
@@ -653,10 +657,6 @@ static void meson_venc_hdmi_encoder_mode_set(struct drm_encoder *encoder,
DRM_DEBUG_DRIVER("%d:\"%s\" vic %d\n",
mode->base.id, mode->name, vic);
- /* Should have been filtered */
- if (!vic)
- return;
-
/* VENC + VENC-DVI Mode setup */
meson_venc_hdmi_mode_set(priv, vic, mode);
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 47677047e42d..f0511220317f 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -328,14 +328,24 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
#define MESON_VCLK_HDMI_DDR_54000 2
/* 2970 /4 /1 /1 /5 /1 => /1 /2 */
#define MESON_VCLK_HDMI_DDR_148500 3
+/* 4028 /4 /4 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_25175 4
+/* 3200 /4 /2 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_40000 5
+/* 5200 /4 /2 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_65000 6
/* 2970 /2 /2 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_74250 4
+#define MESON_VCLK_HDMI_74250 7
+/* 4320 /4 /1 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_108000 8
/* 2970 /1 /2 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_148500 5
+#define MESON_VCLK_HDMI_148500 9
+/* 3240 /2 /1 /1 /5 /2 => /1 /1 */
+#define MESON_VCLK_HDMI_162000 10
/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
-#define MESON_VCLK_HDMI_297000 6
+#define MESON_VCLK_HDMI_297000 11
/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
-#define MESON_VCLK_HDMI_594000 7
+#define MESON_VCLK_HDMI_594000 12
struct meson_vclk_params {
unsigned int pll_base_freq;
@@ -401,6 +411,46 @@ struct meson_vclk_params {
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
+ [MESON_VCLK_HDMI_25175] = {
+ .pll_base_freq = 4028000,
+ .pll_od1 = 4,
+ .pll_od2 = 4,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
+ [MESON_VCLK_HDMI_40000] = {
+ .pll_base_freq = 3200000,
+ .pll_od1 = 4,
+ .pll_od2 = 2,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
+ [MESON_VCLK_HDMI_65000] = {
+ .pll_base_freq = 5200000,
+ .pll_od1 = 4,
+ .pll_od2 = 2,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
+ [MESON_VCLK_HDMI_108000] = {
+ .pll_base_freq = 4320000,
+ .pll_od1 = 4,
+ .pll_od2 = 1,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
+ [MESON_VCLK_HDMI_162000] = {
+ .pll_base_freq = 3240000,
+ .pll_od1 = 2,
+ .pll_od2 = 1,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 2,
+ },
};
static inline unsigned int pll_od_to_reg(unsigned int od)
@@ -451,6 +501,90 @@ void meson_hdmi_pll_set(struct meson_drm *priv,
0xFFFF, 0x4e00);
break;
+ case 3200000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000242);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* div_frac */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0xFFFF, 0x4aab);
+ break;
+
+ case 3240000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000243);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* div_frac */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0xFFFF, 0x4800);
+ break;
+
+ case 3865000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000250);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* div_frac */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0xFFFF, 0x4855);
+ break;
+
+ case 4028000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x58000253);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x0d5c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+
+ /* div_frac */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
+ 0xFFFF, 0x4eab);
+ break;
+
case 4320000:
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800025a);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
@@ -485,6 +619,23 @@ void meson_hdmi_pll_set(struct meson_drm *priv,
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
val, (val & HDMI_PLL_LOCK), 10, 0);
break;
+
+ case 5200000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x5800026c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x135c5091);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x801da72c);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
+
+ /* unreset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ BIT(28), 0);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL,
+ val, (val & HDMI_PLL_LOCK), 10, 0);
+ break;
};
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
@@ -498,6 +649,42 @@ void meson_hdmi_pll_set(struct meson_drm *priv,
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
break;
+ case 3200000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000285);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb155);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
+ case 3240000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x40000287);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
+ case 3865000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a1);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb02b);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
+ case 4028000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002a7);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb355);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
case 4320000:
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002b4);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb000);
@@ -516,6 +703,15 @@ void meson_hdmi_pll_set(struct meson_drm *priv,
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
break;
+ case 5200000:
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x400002d8);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x800cb2ab);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x860f30c4);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0c8e0000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x001fa729);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x01a31500);
+ break;
+
};
/* Reset PLL */
@@ -590,15 +786,30 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
else
freq = MESON_VCLK_HDMI_DDR_54000;
break;
+ case 25175:
+ freq = MESON_VCLK_HDMI_25175;
+ break;
+ case 40000:
+ freq = MESON_VCLK_HDMI_40000;
+ break;
+ case 65000:
+ freq = MESON_VCLK_HDMI_65000;
+ break;
case 74250:
freq = MESON_VCLK_HDMI_74250;
break;
+ case 108000:
+ freq = MESON_VCLK_HDMI_108000;
+ break;
case 148500:
if (dac_freq != 148500)
freq = MESON_VCLK_HDMI_DDR_148500;
else
freq = MESON_VCLK_HDMI_148500;
break;
+ case 162000:
+ freq = MESON_VCLK_HDMI_162000;
+ break;
case 297000:
freq = MESON_VCLK_HDMI_297000;
break;
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 9509017dbded..6e2701389801 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -697,6 +697,314 @@ union meson_hdmi_venc_mode meson_hdmi_encp_mode_1080p60 = {
},
};
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_640x480_60 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 0x31f,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 0x90,
+ .havon_end = 0x30f,
+ .vavon_bline = 0x23,
+ .vavon_eline = 0x202,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 0x60,
+ .vso_begin = 0x1e,
+ .vso_end = 0x32,
+ .vso_bline = 0,
+ .vso_eline = 2,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 0x20c,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_800x600_60 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 0x41f,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 0xD8,
+ .havon_end = 0x3f7,
+ .vavon_bline = 0x1b,
+ .vavon_eline = 0x272,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 0x80,
+ .vso_begin = 0x1e,
+ .vso_end = 0x32,
+ .vso_bline = 0,
+ .vso_eline = 4,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 0x273,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1024x768_60 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 1343,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 296,
+ .havon_end = 1319,
+ .vavon_bline = 35,
+ .vavon_eline = 802,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 136,
+ .vso_begin = 30,
+ .vso_end = 50,
+ .vso_bline = 0,
+ .vso_eline = 6,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 805,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1152x864_75 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 0x63f,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 0x180,
+ .havon_end = 0x5ff,
+ .vavon_bline = 0x23,
+ .vavon_eline = 0x382,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 0x80,
+ .vso_begin = 0x1e,
+ .vso_end = 0x32,
+ .vso_bline = 0,
+ .vso_eline = 3,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 0x383,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1280x1024_60 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 0x697,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 0x168,
+ .havon_end = 0x667,
+ .vavon_bline = 0x29,
+ .vavon_eline = 0x428,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 0x70,
+ .vso_begin = 0x1e,
+ .vso_end = 0x32,
+ .vso_bline = 0,
+ .vso_eline = 3,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 0x429,
+ },
+};
+
+union meson_hdmi_venc_mode meson_hdmi_encp_mode_1600x1200_60 = {
+ .encp = {
+ .dvi_settings = 0x21,
+ .video_mode = 0x4040,
+ .video_mode_adv = 0x18,
+ /* video_prog_mode */
+ /* video_sync_mode */
+ /* video_yc_dly */
+ /* video_rgb_ctrl */
+ /* video_filt_ctrl */
+ /* video_ofld_voav_ofst */
+ /* yfp1_htime */
+ /* yfp2_htime */
+ .max_pxcnt = 0x86f,
+ /* hspuls_begin */
+ /* hspuls_end */
+ /* hspuls_switch */
+ /* vspuls_begin */
+ /* vspuls_end */
+ /* vspuls_bline */
+ /* vspuls_eline */
+ .havon_begin = 0x1f0,
+ .havon_end = 0x82f,
+ .vavon_bline = 0x31,
+ .vavon_eline = 0x4e0,
+ /* eqpuls_begin */
+ /* eqpuls_end */
+ /* eqpuls_bline */
+ /* eqpuls_eline */
+ .hso_begin = 0,
+ .hso_end = 0xc0,
+ .vso_begin = 0x1e,
+ .vso_end = 0x32,
+ .vso_bline = 0,
+ .vso_eline = 3,
+ .vso_eline_present = true,
+ /* sy_val */
+ /* sy2_val */
+ .max_lncnt = 0x4e1,
+ },
+};
+
+struct meson_hdmi_venc_dmt_mode {
+ struct drm_display_mode drm_mode;
+ union meson_hdmi_venc_mode *mode;
+} meson_hdmi_venc_dmt_modes[] = {
+ /* 640x480@60Hz */
+ {
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ &meson_hdmi_encp_mode_640x480_60,
+ },
+ /* 800x600@60Hz */
+ {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ &meson_hdmi_encp_mode_800x600_60,
+ },
+ /* 1024x768@60Hz */
+ {
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024,
+ 1048, 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ &meson_hdmi_encp_mode_1024x768_60,
+ },
+ /* 1152x864@75Hz */
+ {
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152,
+ 1216, 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ &meson_hdmi_encp_mode_1152x864_75,
+ },
+ /* 1280x1024@60Hz */
+ {
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280,
+ 1328, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ &meson_hdmi_encp_mode_1280x1024_60,
+ },
+ /* 1600x1200@60Hz */
+ {
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600,
+ 1664, 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ &meson_hdmi_encp_mode_1600x1200_60,
+ },
+ /* 1920x1080@60Hz */
+ {
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920,
+ 2008, 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ &meson_hdmi_encp_mode_1080p60
+ },
+ { }, /* sentinel */
+};
+
struct meson_hdmi_venc_vic_mode {
unsigned int vic;
union meson_hdmi_venc_mode *mode;
@@ -736,6 +1044,20 @@ static unsigned long modulo(unsigned long a, unsigned long b)
return a;
}
+bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
+{
+ struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
+
+ while (vmode->mode) {
+ if (drm_mode_equal(&vmode->drm_mode, mode))
+ return true;
+ vmode++;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_mode);
+
bool meson_venc_hdmi_supported_vic(int vic)
{
struct meson_hdmi_venc_vic_mode *vmode = meson_hdmi_venc_vic_modes;
@@ -750,6 +1072,20 @@ bool meson_venc_hdmi_supported_vic(int vic)
}
EXPORT_SYMBOL_GPL(meson_venc_hdmi_supported_vic);
+static union meson_hdmi_venc_mode
+*meson_venc_hdmi_get_dmt_vmode(const struct drm_display_mode *mode)
+{
+ struct meson_hdmi_venc_dmt_mode *vmode = meson_hdmi_venc_dmt_modes;
+
+ while (vmode->mode) {
+ if (drm_mode_equal(&vmode->drm_mode, mode))
+ return vmode->mode;
+ vmode++;
+ }
+
+ return NULL;
+}
+
static union meson_hdmi_venc_mode *meson_venc_hdmi_get_vic_vmode(int vic)
{
struct meson_hdmi_venc_vic_mode *vmode = meson_hdmi_venc_vic_modes;
@@ -811,10 +1147,13 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
unsigned int sof_lines;
unsigned int vsync_lines;
- vmode = meson_venc_hdmi_get_vic_vmode(vic);
+ if (meson_venc_hdmi_supported_vic(vic))
+ vmode = meson_venc_hdmi_get_vic_vmode(vic);
+ else
+ vmode = meson_venc_hdmi_get_dmt_vmode(mode);
if (!vmode) {
- dev_err(priv->dev, "%s: Fatal Error, unsupported vic %d\n",
- __func__, vic);
+ dev_err(priv->dev, "%s: Fatal Error, unsupported mode "
+ DRM_MODE_FMT "\n", __func__, DRM_MODE_ARG(mode));
return;
}
@@ -864,7 +1203,7 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
hsync_pixels_venc *= 2;
/* Disable VDACs */
- writel_bits_relaxed(0x1f, 0x1f,
+ writel_bits_relaxed(0xff, 0xff,
priv->io_base + _REG(VENC_VDAC_SETTING));
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index a1b96e898c14..7c18a36a0dd0 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -58,6 +58,7 @@ struct meson_cvbs_enci_mode {
};
/* HDMI Clock parameters */
+bool meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode);
bool meson_venc_hdmi_supported_vic(int vic);
bool meson_venc_hdmi_venc_repeat(int vic);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 99d39b2aefa6..38cbde971b48 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -28,6 +28,19 @@ config DRM_MSM_REGISTER_LOGGING
that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param.
+config DRM_MSM_GPU_SUDO
+ bool "Enable SUDO flag on submits"
+ depends on DRM_MSM && EXPERT
+ default n
+ help
+ Enable userspace that has CAP_SYS_RAWIO to submit GPU commands
+ that are run from RB instead of IB1. This essentially gives
+ userspace kernel level access, but is useful for firmware
+ debugging.
+
+ Only use this if you are a driver developer. This should *not*
+ be enabled for production kernels. If unsure, say N.
+
config DRM_MSM_HDMI_HDCP
bool "Enable HDMI HDCP support in MSM DRM driver"
depends on DRM_MSM && QCOM_SCM
@@ -81,3 +94,10 @@ config DRM_MSM_DSI_14NM_PHY
default y
help
Choose this option if DSI PHY on 8996 is used on the platform.
+
+config DRM_MSM_DSI_10NM_PHY
+ bool "Enable DSI 10nm PHY driver in MSM DRM (used by SDM845)"
+ depends on DRM_MSM_DSI
+ default y
+ help
+ Choose this option if DSI PHY on SDM845 is used on the platform.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 92b3844202d2..cd40c050b2d7 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -25,26 +25,26 @@ msm-y := \
edp/edp_connector.o \
edp/edp_ctrl.o \
edp/edp_phy.o \
- mdp/mdp_format.o \
- mdp/mdp_kms.o \
- mdp/mdp4/mdp4_crtc.o \
- mdp/mdp4/mdp4_dtv_encoder.o \
- mdp/mdp4/mdp4_lcdc_encoder.o \
- mdp/mdp4/mdp4_lvds_connector.o \
- mdp/mdp4/mdp4_irq.o \
- mdp/mdp4/mdp4_kms.o \
- mdp/mdp4/mdp4_plane.o \
- mdp/mdp5/mdp5_cfg.o \
- mdp/mdp5/mdp5_ctl.o \
- mdp/mdp5/mdp5_crtc.o \
- mdp/mdp5/mdp5_encoder.o \
- mdp/mdp5/mdp5_irq.o \
- mdp/mdp5/mdp5_mdss.o \
- mdp/mdp5/mdp5_kms.o \
- mdp/mdp5/mdp5_pipe.o \
- mdp/mdp5/mdp5_mixer.o \
- mdp/mdp5/mdp5_plane.o \
- mdp/mdp5/mdp5_smp.o \
+ disp/mdp_format.o \
+ disp/mdp_kms.o \
+ disp/mdp4/mdp4_crtc.o \
+ disp/mdp4/mdp4_dtv_encoder.o \
+ disp/mdp4/mdp4_lcdc_encoder.o \
+ disp/mdp4/mdp4_lvds_connector.o \
+ disp/mdp4/mdp4_irq.o \
+ disp/mdp4/mdp4_kms.o \
+ disp/mdp4/mdp4_plane.o \
+ disp/mdp5/mdp5_cfg.o \
+ disp/mdp5/mdp5_ctl.o \
+ disp/mdp5/mdp5_crtc.o \
+ disp/mdp5/mdp5_encoder.o \
+ disp/mdp5/mdp5_irq.o \
+ disp/mdp5/mdp5_mdss.o \
+ disp/mdp5/mdp5_kms.o \
+ disp/mdp5/mdp5_pipe.o \
+ disp/mdp5/mdp5_mixer.o \
+ disp/mdp5/mdp5_plane.o \
+ disp/mdp5/mdp5_smp.o \
msm_atomic.o \
msm_debugfs.o \
msm_drv.o \
@@ -62,31 +62,35 @@ msm-y := \
msm_ringbuffer.o \
msm_submitqueue.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
- mdp/mdp4/mdp4_dsi_encoder.o \
+ disp/mdp4/mdp4_dsi_encoder.o \
dsi/dsi_cfg.o \
dsi/dsi_host.o \
dsi/dsi_manager.o \
dsi/phy/dsi_phy.o \
- mdp/mdp5/mdp5_cmd_encoder.o
+ disp/mdp5/mdp5_cmd_encoder.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
+msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
msm-y += dsi/pll/dsi_pll.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
+msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
endif
obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 4baef2738178..3ebbeb3a9b68 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -35,6 +35,7 @@
A3XX_INT0_CP_RB_INT | \
A3XX_INT0_CP_REG_PROTECT_FAULT | \
A3XX_INT0_CP_AHB_ERROR_HALT | \
+ A3XX_INT0_CACHE_FLUSH_TS | \
A3XX_INT0_UCHE_OOB_ACCESS)
extern bool hang_debug;
@@ -256,8 +257,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
*/
/* Load PM4: */
- ptr = (uint32_t *)(adreno_gpu->pm4->data);
- len = adreno_gpu->pm4->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %x", ptr[1]);
gpu_write(gpu, REG_AXXX_CP_DEBUG,
@@ -268,8 +269,8 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
- ptr = (uint32_t *)(adreno_gpu->pfp->data);
- len = adreno_gpu->pfp->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
DBG("loading PFP ucode version: %x", ptr[5]);
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 8199a4b9f2fa..16d3d596638e 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -27,6 +27,7 @@
A4XX_INT0_CP_RB_INT | \
A4XX_INT0_CP_REG_PROTECT_FAULT | \
A4XX_INT0_CP_AHB_ERROR_HALT | \
+ A4XX_INT0_CACHE_FLUSH_TS | \
A4XX_INT0_UCHE_OOB_ACCESS)
extern bool hang_debug;
@@ -274,16 +275,16 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
return ret;
/* Load PM4: */
- ptr = (uint32_t *)(adreno_gpu->pm4->data);
- len = adreno_gpu->pm4->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PM4]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PM4]->size / 4;
DBG("loading PM4 ucode version: %u", ptr[0]);
gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
for (i = 1; i < len; i++)
gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
/* Load PFP: */
- ptr = (uint32_t *)(adreno_gpu->pfp->data);
- len = adreno_gpu->pfp->size / 4;
+ ptr = (uint32_t *)(adreno_gpu->fw[ADRENO_FW_PFP]->data);
+ len = adreno_gpu->fw[ADRENO_FW_PFP]->size / 4;
DBG("loading PFP ucode version: %u", ptr[0]);
gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
new file mode 100644
index 000000000000..059ec7d394d0
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -0,0 +1,187 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
+#include "a5xx_gpu.h"
+
+static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "PFP state:\n");
+
+ for (i = 0; i < 36; i++) {
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i);
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
+ }
+
+ return 0;
+}
+
+static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "ME state:\n");
+
+ for (i = 0; i < 29; i++) {
+ gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i);
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
+ }
+
+ return 0;
+}
+
+static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "MEQ state:\n");
+ gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+
+ for (i = 0; i < 64; i++) {
+ drm_printf(p, " %02x: %08x\n", i,
+ gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+ }
+
+ return 0;
+}
+
+static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
+{
+ int i;
+
+ drm_printf(p, "ROQ state:\n");
+ gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+
+ for (i = 0; i < 512 / 4; i++) {
+ uint32_t val[4];
+ int j;
+ for (j = 0; j < 4; j++)
+ val[j] = gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA);
+ drm_printf(p, " %02x: %08x %08x %08x %08x\n", i,
+ val[0], val[1], val[2], val[3]);
+ }
+
+ return 0;
+}
+
+static int show(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ int (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
+ node->info_ent->data;
+
+ return show(priv->gpu, &p);
+}
+
+#define ENT(n) { .name = #n, .show = show, .data = n ##_print }
+static struct drm_info_list a5xx_debugfs_list[] = {
+ ENT(pfp),
+ ENT(me),
+ ENT(meq),
+ ENT(roq),
+};
+
+/* for debugfs files that can be written to, we can't use drm helper: */
+static int
+reset_set(void *data, u64 val)
+{
+ struct drm_device *dev = data;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EINVAL;
+
+ /* TODO do we care about trying to make sure the GPU is idle?
+ * Since this is just a debug feature limited to CAP_SYS_ADMIN,
+ * maybe it is fine to let the user keep both pieces if they
+ * try to reset an active GPU.
+ */
+
+ mutex_lock(&dev->struct_mutex);
+
+ release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
+ adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
+
+ release_firmware(adreno_gpu->fw[ADRENO_FW_PFP]);
+ adreno_gpu->fw[ADRENO_FW_PFP] = NULL;
+
+ if (a5xx_gpu->pm4_bo) {
+ if (a5xx_gpu->pm4_iova)
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_unreference(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ if (a5xx_gpu->pfp_iova)
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_unreference(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ }
+
+ gpu->needs_hw_init = true;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ gpu->funcs->recover(gpu);
+
+ pm_runtime_put_sync(&gpu->pdev->dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
+
+
+int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
+{
+ struct drm_device *dev;
+ struct dentry *ent;
+ int ret;
+
+ if (!minor)
+ return 0;
+
+ dev = minor->dev;
+
+ ret = drm_debugfs_create_files(a5xx_debugfs_list,
+ ARRAY_SIZE(a5xx_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install a5xx_debugfs_list\n");
+ return ret;
+ }
+
+ ent = debugfs_create_file("reset", S_IWUGO,
+ minor->debugfs_root,
+ dev, &reset_fops);
+ if (!ent)
+ return -ENOMEM;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 7e09d44e4a15..a4f68affc13b 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -140,6 +140,65 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
}
+static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ struct msm_gem_object *obj;
+ uint32_t *ptr, dwords;
+ unsigned int i;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ /* copy commands into RB: */
+ obj = submit->bos[submit->cmd[i].idx].obj;
+ dwords = submit->cmd[i].size;
+
+ ptr = msm_gem_get_vaddr(&obj->base);
+
+ /* _get_vaddr() shouldn't fail at this point,
+ * since we've already mapped it once in
+ * submit_reloc()
+ */
+ if (WARN_ON(!ptr))
+ return;
+
+ for (i = 0; i < dwords; i++) {
+ /* normally the OUT_PKTn() would wait
+ * for space for the packet. But since
+ * we just OUT_RING() the whole thing,
+ * need to call adreno_wait_ring()
+ * ourself:
+ */
+ adreno_wait_ring(ring, 1);
+ OUT_RING(ring, ptr[i]);
+ }
+
+ msm_gem_put_vaddr(&obj->base);
+
+ break;
+ }
+ }
+
+ a5xx_flush(gpu, ring);
+ a5xx_preempt_trigger(gpu);
+
+ /* we might not necessarily have a cmd from userspace to
+ * trigger an event to know that submit has completed, so
+ * do this manually:
+ */
+ a5xx_idle(gpu, ring);
+ ring->memptrs->fence = submit->seqno;
+ msm_gpu_retire(gpu);
+}
+
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx)
{
@@ -149,6 +208,12 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
+ if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
+ priv->lastctx = NULL;
+ a5xx_submit_in_rb(gpu, submit, ctx);
+ return;
+ }
+
OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
OUT_RING(ring, 0x02);
@@ -432,25 +497,6 @@ static int a5xx_preempt_start(struct msm_gpu *gpu)
return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
}
-
-static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
- const struct firmware *fw, u64 *iova)
-{
- struct drm_gem_object *bo;
- void *ptr;
-
- ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
- MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
-
- if (IS_ERR(ptr))
- return ERR_CAST(ptr);
-
- memcpy(ptr, &fw->data[4], fw->size - 4);
-
- msm_gem_put_vaddr(bo);
- return bo;
-}
-
static int a5xx_ucode_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -458,8 +504,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
int ret;
if (!a5xx_gpu->pm4_bo) {
- a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
- &a5xx_gpu->pm4_iova);
+ a5xx_gpu->pm4_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_PM4], &a5xx_gpu->pm4_iova);
if (IS_ERR(a5xx_gpu->pm4_bo)) {
ret = PTR_ERR(a5xx_gpu->pm4_bo);
@@ -471,8 +517,8 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
}
if (!a5xx_gpu->pfp_bo) {
- a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
- &a5xx_gpu->pfp_iova);
+ a5xx_gpu->pfp_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_PFP], &a5xx_gpu->pfp_iova);
if (IS_ERR(a5xx_gpu->pfp_bo)) {
ret = PTR_ERR(a5xx_gpu->pfp_bo);
@@ -793,19 +839,19 @@ static void a5xx_destroy(struct msm_gpu *gpu)
if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+ drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
if (a5xx_gpu->pfp_iova)
msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+ drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+ drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
}
adreno_gpu_cleanup(adreno_gpu);
@@ -1195,6 +1241,7 @@ static const struct adreno_gpu_funcs funcs = {
.destroy = a5xx_destroy,
#ifdef CONFIG_DEBUG_FS
.show = a5xx_show,
+ .debugfs_init = a5xx_debugfs_init,
#endif
.gpu_busy = a5xx_gpu_busy,
},
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 6fb8c2f9b9e4..7d71860c4bee 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -49,6 +49,10 @@ struct a5xx_gpu {
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+#ifdef CONFIG_DEBUG_FS
+int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
+#endif
+
/*
* In order to do lockless preemption we use a simple state machine to progress
* through the process.
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
index 4e4d965fd9ab..e9c0e56dbec0 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_power.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -261,7 +261,6 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct drm_device *drm = gpu->dev;
- const struct firmware *fw;
uint32_t dwords = 0, offset = 0, bosize;
unsigned int *data, *ptr, *cmds;
unsigned int cmds_size;
@@ -269,15 +268,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
if (a5xx_gpu->gpmu_bo)
return;
- /* Get the firmware */
- fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->gpmufw);
- if (IS_ERR(fw)) {
- DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
- gpu->name);
- return;
- }
-
- data = (unsigned int *) fw->data;
+ data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data;
/*
* The first dword is the size of the remaining data in dwords. Use it
@@ -285,12 +276,14 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
* the firmware that we read
*/
- if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
- goto out;
+ if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 ||
+ (data[0] < 2) || (data[0] >=
+ (adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2)))
+ return;
/* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
if (data[1] != 2)
- goto out;
+ return;
cmds = data + data[2] + 3;
cmds_size = data[0] - data[2] - 2;
@@ -325,8 +318,7 @@ void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
a5xx_gpu->gpmu_dwords = dwords;
- goto out;
-
+ return;
err:
if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
@@ -336,8 +328,4 @@ err:
a5xx_gpu->gpmu_bo = NULL;
a5xx_gpu->gpmu_iova = 0;
a5xx_gpu->gpmu_dwords = 0;
-
-out:
- /* No need to keep that firmware laying around anymore */
- release_firmware(fw);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 62bdb7316da1..8e0cb161754b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -30,61 +30,75 @@ static const struct adreno_info gpulist[] = {
.rev = ADRENO_REV(3, 0, 5, ANY_ID),
.revn = 305,
.name = "A305",
- .pm4fw = "a300_pm4.fw",
- .pfpfw = "a300_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
.gmem = SZ_256K,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 0, 6, 0),
.revn = 307, /* because a305c is revn==306 */
.name = "A306",
- .pm4fw = "a300_pm4.fw",
- .pfpfw = "a300_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
.gmem = SZ_128K,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
.revn = 320,
.name = "A320",
- .pm4fw = "a300_pm4.fw",
- .pfpfw = "a300_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
.gmem = SZ_512K,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 3, 0, ANY_ID),
.revn = 330,
.name = "A330",
- .pm4fw = "a330_pm4.fw",
- .pfpfw = "a330_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a330_pm4.fw",
+ [ADRENO_FW_PFP] = "a330_pfp.fw",
+ },
.gmem = SZ_1M,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
.revn = 420,
.name = "A420",
- .pm4fw = "a420_pm4.fw",
- .pfpfw = "a420_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 3, 0, ANY_ID),
.revn = 430,
.name = "A430",
- .pm4fw = "a420_pm4.fw",
- .pfpfw = "a420_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 3, 0, 2),
.revn = 530,
.name = "A530",
- .pm4fw = "a530_pm4.fw",
- .pfpfw = "a530_pfp.fw",
+ .fw = {
+ [ADRENO_FW_PM4] = "a530_pm4.fw",
+ [ADRENO_FW_PFP] = "a530_pfp.fw",
+ [ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
+ },
.gmem = SZ_1M,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
- .gpmufw = "a530v3_gpmu.fw2",
.zapfw = "a530_zap.mdt",
},
};
@@ -150,6 +164,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return NULL;
}
+#ifdef CONFIG_DEBUG_FS
+ if (gpu->funcs->debugfs_init) {
+ gpu->funcs->debugfs_init(gpu, dev->primary);
+ gpu->funcs->debugfs_init(gpu, dev->render);
+ gpu->funcs->debugfs_init(gpu, dev->control);
+ }
+#endif
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index de63ff26a062..17d0506d058c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -140,27 +140,47 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
{
- const struct firmware *fw;
+ int i;
- if (adreno_gpu->pm4)
- return 0;
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) {
+ const struct firmware *fw;
+
+ if (!adreno_gpu->info->fw[i])
+ continue;
+
+ /* Skip if the firmware has already been loaded */
+ if (adreno_gpu->fw[i])
+ continue;
- fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pm4fw);
- if (IS_ERR(fw))
- return PTR_ERR(fw);
- adreno_gpu->pm4 = fw;
+ fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]);
+ if (IS_ERR(fw))
+ return PTR_ERR(fw);
- fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->pfpfw);
- if (IS_ERR(fw)) {
- release_firmware(adreno_gpu->pm4);
- adreno_gpu->pm4 = NULL;
- return PTR_ERR(fw);
+ adreno_gpu->fw[i] = fw;
}
- adreno_gpu->pfp = fw;
return 0;
}
+struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ ptr = msm_gem_kernel_new_locked(gpu->dev, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+
+ msm_gem_put_vaddr(bo);
+
+ return bo;
+}
+
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -293,26 +313,12 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_RING(ring, 0x00000000);
}
+ /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
- OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->seqno);
- /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
- OUT_PKT3(ring, CP_INTERRUPT, 1);
- OUT_RING(ring, 0x80000000);
-
- /* Workaround for missing irq issue on 8x16/a306. Unsure if the
- * root cause is a platform issue or some a306 quirk, but this
- * keeps things humming along:
- */
- if (adreno_is_a306(adreno_gpu)) {
- OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
- OUT_RING(ring, 0x00000000);
- OUT_PKT3(ring, CP_INTERRUPT, 1);
- OUT_RING(ring, 0x80000000);
- }
-
#if 0
if (adreno_is_a3xx(adreno_gpu)) {
/* Dummy set-constant to trigger context rollover */
@@ -569,8 +575,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
- release_firmware(adreno_gpu->pm4);
- release_firmware(adreno_gpu->pfp);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++)
+ release_firmware(adreno_gpu->fw[i]);
msm_gpu_cleanup(&adreno_gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 8d3d0a924908..d6b0e7b813f4 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -48,6 +48,13 @@ enum adreno_regs {
REG_ADRENO_REGISTER_MAX,
};
+enum {
+ ADRENO_FW_PM4 = 0,
+ ADRENO_FW_PFP = 1,
+ ADRENO_FW_GPMU = 2,
+ ADRENO_FW_MAX,
+};
+
enum adreno_quirks {
ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
@@ -72,8 +79,7 @@ struct adreno_info {
struct adreno_rev rev;
uint32_t revn;
const char *name;
- const char *pm4fw, *pfpfw;
- const char *gpmufw;
+ const char *fw[ADRENO_FW_MAX];
uint32_t gmem;
enum adreno_quirks quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
@@ -115,7 +121,7 @@ struct adreno_gpu {
} fwloc;
/* firmware: */
- const struct firmware *pm4, *pfp;
+ const struct firmware *fw[ADRENO_FW_MAX];
/*
* Register offsets are different between some GPUs.
@@ -200,6 +206,8 @@ static inline int adreno_is_a530(struct adreno_gpu *gpu)
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
+struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova);
int adreno_hw_init(struct msm_gpu *gpu);
void adreno_recover(struct msm_gpu *gpu);
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
index 576cea30d391..576cea30d391 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index 14bd3bd3e040..6e5e1aa54ce1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -129,7 +129,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct msm_kms *kms = &mdp4_kms->base.base;
msm_gem_put_iova(val, kms->aspace);
- drm_gem_object_unreference_unlocked(val);
+ drm_gem_object_put_unlocked(val);
}
static void mdp4_crtc_destroy(struct drm_crtc *crtc)
@@ -382,7 +382,7 @@ static void update_cursor(struct drm_crtc *crtc)
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
- drm_gem_object_reference(next_bo);
+ drm_gem_object_get(next_bo);
msm_gem_get_iova(next_bo, kms->aspace, &iova);
/* enable cursor: */
@@ -467,7 +467,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail:
- drm_gem_object_unreference_unlocked(cursor_bo);
+ drm_gem_object_put_unlocked(cursor_bo);
return ret;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
index 6a1ebdace391..6a1ebdace391 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dsi_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dsi_encoder.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
index ba8e587f734b..ba8e587f734b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
index b764d7f10312..b764d7f10312 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_irq.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index f7f087419ed8..4b646bf9c214 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -164,7 +164,7 @@ static void mdp4_destroy(struct msm_kms *kms)
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
- drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+ drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
index a1b3e31e959e..0c13f8697bfe 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
@@ -22,7 +22,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
-#include "mdp/mdp_kms.h"
+#include "disp/mdp_kms.h"
#include "mdp4.xml.h"
struct device_node;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
index 4a645926edb7..4a645926edb7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lcdc_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
index e3b1c86b7aae..e3b1c86b7aae 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
index ce4245971673..ce4245971673 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_pll.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_pll.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 7a1ad3af08e3..7a1ad3af08e3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
index d9c10e02ee41..d9c10e02ee41 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 824067d2d427..824067d2d427 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
index 75910d0f2f4c..75910d0f2f4c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index 1abc7f5c345c..d6f79dc755b4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -159,7 +159,7 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
pingpong_tearcheck_disable(encoder);
mdp5_ctl_set_encoder_state(ctl, pipeline, false);
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
bs_set(mdp5_cmd_enc, 0);
@@ -180,7 +180,7 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
if (pingpong_tearcheck_enable(encoder))
return;
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index e414850dbbda..9893e43ba6c5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -97,9 +97,13 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_ctl *ctl = mdp5_cstate->ctl;
struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ bool start = !mdp5_cstate->defer_start;
+
+ mdp5_cstate->defer_start = false;
DBG("%s: flush=%08x", crtc->name, flush_mask);
- return mdp5_ctl_commit(ctl, pipeline, flush_mask);
+
+ return mdp5_ctl_commit(ctl, pipeline, flush_mask, start);
}
/*
@@ -170,7 +174,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct msm_kms *kms = &mdp5_kms->base.base;
msm_gem_put_iova(val, kms->aspace);
- drm_gem_object_unreference_unlocked(val);
+ drm_gem_object_put_unlocked(val);
}
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
@@ -947,12 +951,17 @@ mdp5_crtc_atomic_print_state(struct drm_printer *p,
if (WARN_ON(!pipeline))
return;
+ if (mdp5_cstate->ctl)
+ drm_printf(p, "\tctl=%d\n", mdp5_ctl_get_ctl_id(mdp5_cstate->ctl));
+
drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
pipeline->mixer->name : "(null)");
if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
pipeline->r_mixer->name : "(null)");
+
+ drm_printf(p, "\tcmd_mode=%d\n", mdp5_cstate->cmd_mode);
}
static void mdp5_crtc_reset(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
index 439e0a300e25..f93d5681267c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
@@ -41,7 +41,9 @@ struct mdp5_ctl {
u32 status;
bool encoder_enabled;
- uint32_t start_mask;
+
+ /* pending flush_mask bits */
+ u32 flush_mask;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
@@ -173,16 +175,8 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
{
- struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
- struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+ struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
struct mdp5_interface *intf = pipeline->intf;
- struct mdp5_hw_mixer *mixer = pipeline->mixer;
- struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
-
- ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
- mdp_ctl_flush_mask_encoder(intf);
- if (r_mixer)
- ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
if (!mdp5_cfg_intf_is_virtual(intf->type))
@@ -198,7 +192,7 @@ static bool start_signal_needed(struct mdp5_ctl *ctl,
{
struct mdp5_interface *intf = pipeline->intf;
- if (!ctl->encoder_enabled || ctl->start_mask != 0)
+ if (!ctl->encoder_enabled)
return false;
switch (intf->type) {
@@ -227,25 +221,6 @@ static void send_start_signal(struct mdp5_ctl *ctl)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-static void refill_start_mask(struct mdp5_ctl *ctl,
- struct mdp5_pipeline *pipeline)
-{
- struct mdp5_interface *intf = pipeline->intf;
- struct mdp5_hw_mixer *mixer = pipeline->mixer;
- struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
-
- ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
- if (r_mixer)
- ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
-
- /*
- * Writeback encoder needs to program & flush
- * address registers for each page flip..
- */
- if (intf->type == INTF_WB)
- ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
-}
-
/**
* mdp5_ctl_set_encoder_state() - set the encoder state
*
@@ -268,7 +243,6 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
- refill_start_mask(ctl, pipeline);
}
return 0;
@@ -494,6 +468,8 @@ u32 mdp_ctl_flush_mask_lm(int lm)
case 0: return MDP5_CTL_FLUSH_LM0;
case 1: return MDP5_CTL_FLUSH_LM1;
case 2: return MDP5_CTL_FLUSH_LM2;
+ case 3: return MDP5_CTL_FLUSH_LM3;
+ case 4: return MDP5_CTL_FLUSH_LM4;
case 5: return MDP5_CTL_FLUSH_LM5;
default: return 0;
}
@@ -557,17 +533,14 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
*/
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
struct mdp5_pipeline *pipeline,
- u32 flush_mask)
+ u32 flush_mask, bool start)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 flush_id = ctl->id;
u32 curr_ctl_flush_mask;
- ctl->start_mask &= ~flush_mask;
-
- VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
- ctl->start_mask, ctl->pending_ctl_trigger);
+ VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
if (ctl->pending_ctl_trigger & flush_mask) {
flush_mask |= MDP5_CTL_FLUSH_CTL;
@@ -582,6 +555,14 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
fix_for_single_flush(ctl, &flush_mask, &flush_id);
+ if (!start) {
+ ctl->flush_mask |= flush_mask;
+ return curr_ctl_flush_mask;
+ } else {
+ flush_mask |= ctl->flush_mask;
+ ctl->flush_mask = 0;
+ }
+
if (flush_mask) {
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
@@ -590,7 +571,6 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
- refill_start_mask(ctl, pipeline);
}
return curr_ctl_flush_mask;
@@ -711,6 +691,7 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
struct mdp5_ctl_manager *ctl_mgr;
const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
+ unsigned dsi_cnt = 0;
const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
unsigned long flags;
int c, ret;
@@ -760,7 +741,10 @@ struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
* only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
* Single FLUSH is supported from hw rev v3.0.
*/
- if (rev >= 3) {
+ for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
+ if (hw_cfg->intf.connect[c] == INTF_DSI)
+ dsi_cnt++;
+ if ((rev >= 3) && (dsi_cnt > 1)) {
ctl_mgr->single_flush_supported = true;
/* Reserve CTL0/1 for INTF1/2 */
ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
index b63120388dc6..403b0db0fa4c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h
@@ -78,7 +78,7 @@ u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
/* @flush_mask: see CTL flush masks definitions below */
u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
- u32 flush_mask);
+ u32 flush_mask, bool start);
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 36ad3cbe5f79..9af94e35f678 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -228,7 +228,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
/*
* Wait for a vsync so we know the ENABLE=0 latched before
@@ -262,7 +262,7 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
mdp5_ctl_set_encoder_state(ctl, pipeline, true);
@@ -319,6 +319,7 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
mdp5_cstate->ctl = ctl;
mdp5_cstate->pipeline.intf = intf;
+ mdp5_cstate->defer_start = true;
return 0;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
index 280e368bc9bb..280e368bc9bb 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_irq.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 3e9bba4d6624..6d8e3a9a6fc0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
} else {
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
- aspace = NULL;;
+ aspace = NULL;
}
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
index 9b3fe01089d1..425a03d213e5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h
@@ -20,7 +20,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
-#include "mdp/mdp_kms.h"
+#include "disp/mdp_kms.h"
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
#include "mdp5_pipe.h"
@@ -133,6 +133,14 @@ struct mdp5_crtc_state {
u32 pp_done_irqmask;
bool cmd_mode;
+
+ /* should we not write CTL[n].START register on flush? If the
+ * encoder has changed this is set to true, since encoder->enable()
+ * is called after crtc state is committed, but we only want to
+ * write the CTL[n].START register once. This lets us defer
+ * writing CTL[n].START until encoder->enable()
+ */
+ bool defer_start;
};
#define to_mdp5_crtc_state(x) \
container_of(x, struct mdp5_crtc_state, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index f2a0db7a8a03..f2a0db7a8a03 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
index 8a00991f03c7..8a00991f03c7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
index 9be94f567fbd..9be94f567fbd 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
index ff52c49095f9..ff52c49095f9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
index bb2b0ac7aa2b..bb2b0ac7aa2b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_pipe.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
index 44fc9fe4737a..a9f31da7d45a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_plane.c
@@ -535,7 +535,7 @@ static void mdp5_plane_atomic_async_update(struct drm_plane *plane,
ctl = mdp5_crtc_get_ctl(new_state->crtc);
- mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
+ mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane), true);
}
*to_mdp5_plane_state(plane->state) =
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index ae4983d9d0a5..ae4983d9d0a5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
index b41d0448fbe8..b41d0448fbe8 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
index 1494c407be44..1494c407be44 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/disp/mdp_format.c
index b4a8aa4490ee..b4a8aa4490ee 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/disp/mdp_format.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/disp/mdp_kms.c
index 64287304054d..64287304054d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.c
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/disp/mdp_kms.h
index 1185487e7e5e..1185487e7e5e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/disp/mdp_kms.h
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 98742d7af6dc..b744bcc7d8ad 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -192,13 +192,14 @@ void __exit msm_dsi_unregister(void)
int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_encoder *encoder)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv;
struct drm_bridge *ext_bridge;
int ret;
- if (WARN_ON(!encoder))
+ if (WARN_ON(!encoder) || WARN_ON(!msm_dsi) || WARN_ON(!dev))
return -EINVAL;
+ priv = dev->dev_private;
msm_dsi->dev = dev;
ret = msm_dsi_host_modeset_init(msm_dsi->host, dev);
@@ -245,19 +246,17 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
return 0;
fail:
- if (msm_dsi) {
- /* bridge/connector are normally destroyed by drm: */
- if (msm_dsi->bridge) {
- msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
- msm_dsi->bridge = NULL;
- }
+ /* bridge/connector are normally destroyed by drm: */
+ if (msm_dsi->bridge) {
+ msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+ msm_dsi->bridge = NULL;
+ }
- /* don't destroy connector if we didn't make it */
- if (msm_dsi->connector && !msm_dsi->external_bridge)
- msm_dsi->connector->funcs->destroy(msm_dsi->connector);
+ /* don't destroy connector if we didn't make it */
+ if (msm_dsi->connector && !msm_dsi->external_bridge)
+ msm_dsi->connector->funcs->destroy(msm_dsi->connector);
- msm_dsi->connector = NULL;
- }
+ msm_dsi->connector = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 2302046197a8..70d9a9a47acd 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -36,6 +36,7 @@ enum msm_dsi_phy_type {
MSM_DSI_PHY_20NM,
MSM_DSI_PHY_28NM_8960,
MSM_DSI_PHY_14NM,
+ MSM_DSI_PHY_10NM,
MSM_DSI_PHY_MAX
};
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index 479086ccf180..f6a9471b70c8 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,19 +8,10 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-01-12 09:09:22)
+- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -1556,5 +1547,175 @@ static inline uint32_t REG_DSI_14nm_PHY_LN_VREG_CNTRL(uint32_t i0) { return 0x00
#define REG_DSI_14nm_PHY_PLL_PLL_BANDGAP 0x00000108
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID0 0x00000000
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID1 0x00000004
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID2 0x00000008
+
+#define REG_DSI_10nm_PHY_CMN_REVISION_ID3 0x0000000c
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG0 0x00000010
+
+#define REG_DSI_10nm_PHY_CMN_CLK_CFG1 0x00000014
+
+#define REG_DSI_10nm_PHY_CMN_GLBL_CTRL 0x00000018
+
+#define REG_DSI_10nm_PHY_CMN_RBUF_CTRL 0x0000001c
+
+#define REG_DSI_10nm_PHY_CMN_VREG_CTRL 0x00000020
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_0 0x00000024
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_1 0x00000028
+
+#define REG_DSI_10nm_PHY_CMN_CTRL_2 0x0000002c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG0 0x00000030
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CFG1 0x00000034
+
+#define REG_DSI_10nm_PHY_CMN_PLL_CNTRL 0x00000038
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL0 0x00000098
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL1 0x0000009c
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL2 0x000000a0
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL3 0x000000a4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_CTRL4 0x000000a8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0 0x000000ac
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1 0x000000b0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2 0x000000b4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3 0x000000b8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4 0x000000bc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5 0x000000c0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6 0x000000c4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7 0x000000c8
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8 0x000000cc
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9 0x000000d0
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10 0x000000d4
+
+#define REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11 0x000000d8
+
+#define REG_DSI_10nm_PHY_CMN_PHY_STATUS 0x000000ec
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS0 0x000000f4
+
+#define REG_DSI_10nm_PHY_CMN_LANE_STATUS1 0x000000f8
+
+static inline uint32_t REG_DSI_10nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_CFG3(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(uint32_t i0) { return 0x0000001c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(uint32_t i0) { return 0x00000020 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(uint32_t i0) { return 0x00000024 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000028 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000002c + 0x80*i0; }
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE 0x00000000
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO 0x00000004
+
+#define REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE 0x00000010
+
+#define REG_DSI_10nm_PHY_PLL_DSM_DIVIDER 0x0000001c
+
+#define REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER 0x00000020
+
+#define REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES 0x00000024
+
+#define REG_DSI_10nm_PHY_PLL_CMODE 0x0000002c
+
+#define REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS 0x00000030
+
+#define REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE 0x00000054
+
+#define REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE 0x00000064
+
+#define REG_DSI_10nm_PHY_PLL_PFILT 0x0000007c
+
+#define REG_DSI_10nm_PHY_PLL_IFILT 0x00000080
+
+#define REG_DSI_10nm_PHY_PLL_OUTDIV 0x00000094
+
+#define REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE 0x000000a4
+
+#define REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE 0x000000a8
+
+#define REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO 0x000000b4
+
+#define REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1 0x000000cc
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1 0x000000d0
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1 0x000000d4
+
+#define REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1 0x000000d8
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1 0x0000010c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1 0x00000110
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1 0x00000114
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1 0x00000118
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1 0x0000011c
+
+#define REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1 0x00000120
+
+#define REG_DSI_10nm_PHY_PLL_SSC_CONTROL 0x0000013c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE 0x00000140
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1 0x00000144
+
+#define REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1 0x0000014c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1 0x00000154
+
+#define REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1 0x0000015c
+
+#define REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1 0x00000164
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE 0x00000180
+
+#define REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY 0x00000184
+
+#define REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS 0x0000018c
+
+#define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE 0x000001a0
+
#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 65c1dfbbe019..0327bb54b01b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -118,6 +118,24 @@ static const struct msm_dsi_config msm8996_dsi_cfg = {
.num_dsi = 2,
};
+static const char * const dsi_sdm845_bus_clk_names[] = {
+ "iface", "bus",
+};
+
+static const struct msm_dsi_config sdm845_dsi_cfg = {
+ .io_offset = DSI_6G_REG_SHIFT,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdda", 21800, 4 }, /* 1.2 V */
+ },
+ },
+ .bus_clk_names = dsi_sdm845_bus_clk_names,
+ .num_bus_clks = ARRAY_SIZE(dsi_sdm845_bus_clk_names),
+ .io_start = { 0xae94000, 0xae96000 },
+ .num_dsi = 2,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
@@ -131,6 +149,7 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 00a5da2663c6..9cfdcf1c95d5 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -25,6 +25,7 @@
#define MSM_DSI_6G_VER_MINOR_V1_3 0x10030000
#define MSM_DSI_6G_VER_MINOR_V1_3_1 0x10030001
#define MSM_DSI_6G_VER_MINOR_V1_4_1 0x10040001
+#define MSM_DSI_6G_VER_MINOR_V2_2_1 0x20020001
#define MSM_DSI_V2_VER_MINOR_8064 0x0
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 0f7324a686ca..7a03a9489708 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -115,6 +115,7 @@ struct msm_dsi_host {
struct clk *pixel_clk;
struct clk *byte_clk_src;
struct clk *pixel_clk_src;
+ struct clk *byte_intf_clk;
u32 byte_clk_rate;
u32 esc_clk_rate;
@@ -214,7 +215,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
goto exit;
}
- ahb_clk = clk_get(dev, "iface_clk");
+ ahb_clk = msm_clk_get(msm_host->pdev, "iface");
if (IS_ERR(ahb_clk)) {
pr_err("%s: cannot get interface clock\n", __func__);
goto put_gdsc;
@@ -225,7 +226,7 @@ static const struct msm_dsi_cfg_handler *dsi_get_config(
ret = regulator_enable(gdsc_reg);
if (ret) {
pr_err("%s: unable to enable gdsc\n", __func__);
- goto put_clk;
+ goto put_gdsc;
}
ret = clk_prepare_enable(ahb_clk);
@@ -249,8 +250,6 @@ disable_clks:
disable_gdsc:
regulator_disable(gdsc_reg);
pm_runtime_put_sync(dev);
-put_clk:
- clk_put(ahb_clk);
put_gdsc:
regulator_put(gdsc_reg);
exit:
@@ -379,6 +378,19 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
+ if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
+ cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_2_1) {
+ msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
+ if (IS_ERR(msm_host->byte_intf_clk)) {
+ ret = PTR_ERR(msm_host->byte_intf_clk);
+ pr_err("%s: can't find byte_intf clock. ret=%d\n",
+ __func__, ret);
+ goto exit;
+ }
+ } else {
+ msm_host->byte_intf_clk = NULL;
+ }
+
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) {
ret = -ENODEV;
@@ -504,6 +516,16 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto error;
}
+ if (msm_host->byte_intf_clk) {
+ ret = clk_set_rate(msm_host->byte_intf_clk,
+ msm_host->byte_clk_rate / 2);
+ if (ret) {
+ pr_err("%s: Failed to set rate byte intf clk, %d\n",
+ __func__, ret);
+ goto error;
+ }
+ }
+
ret = clk_prepare_enable(msm_host->esc_clk);
if (ret) {
pr_err("%s: Failed to enable dsi esc clk\n", __func__);
@@ -522,8 +544,19 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto pixel_clk_err;
}
+ if (msm_host->byte_intf_clk) {
+ ret = clk_prepare_enable(msm_host->byte_intf_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable byte intf clk\n",
+ __func__);
+ goto byte_intf_clk_err;
+ }
+ }
+
return 0;
+byte_intf_clk_err:
+ clk_disable_unprepare(msm_host->pixel_clk);
pixel_clk_err:
clk_disable_unprepare(msm_host->byte_clk);
byte_clk_err:
@@ -617,6 +650,8 @@ static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
clk_disable_unprepare(msm_host->esc_clk);
clk_disable_unprepare(msm_host->pixel_clk);
+ if (msm_host->byte_intf_clk)
+ clk_disable_unprepare(msm_host->byte_intf_clk);
clk_disable_unprepare(msm_host->byte_clk);
} else {
clk_disable_unprepare(msm_host->pixel_clk);
@@ -1028,10 +1063,8 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
if (msm_host->tx_gem_obj) {
msm_gem_put_iova(msm_host->tx_gem_obj, 0);
- mutex_lock(&dev->struct_mutex);
- msm_gem_free_object(msm_host->tx_gem_obj);
+ drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
- mutex_unlock(&dev->struct_mutex);
}
if (msm_host->tx_buf)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 855248132b2b..4cb1cb68878b 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -88,6 +88,8 @@ static int dsi_mgr_setup_components(int id)
msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+ if (IS_ERR(src_pll))
+ return PTR_ERR(src_pll);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
} else if (!other_dsi) {
ret = 0;
@@ -116,6 +118,8 @@ static int dsi_mgr_setup_components(int id)
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
MSM_DSI_PHY_SLAVE);
src_pll = msm_dsi_phy_get_pll(clk_master_dsi->phy);
+ if (IS_ERR(src_pll))
+ return PTR_ERR(src_pll);
ret = msm_dsi_host_set_src_pll(msm_dsi->host, src_pll);
if (ret)
return ret;
@@ -858,7 +862,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
int id = msm_dsi->id;
int ret;
- if (id > DSI_MAX) {
+ if (id >= DSI_MAX) {
pr_err("%s: invalid id %d\n", __func__, id);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 790ca280cbfd..8e9d5c255820 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -395,6 +395,10 @@ static const struct of_device_id dsi_phy_dt_match[] = {
{ .compatible = "qcom,dsi-phy-14nm",
.data = &dsi_phy_14nm_cfgs },
#endif
+#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
+ { .compatible = "qcom,dsi-phy-10nm",
+ .data = &dsi_phy_10nm_cfgs },
+#endif
{}
};
@@ -503,10 +507,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
goto fail;
phy->pll = msm_dsi_pll_init(pdev, phy->cfg->type, phy->id);
- if (!phy->pll)
+ if (IS_ERR_OR_NULL(phy->pll))
dev_info(dev,
- "%s: pll init failed, need separate pll clk driver\n",
- __func__);
+ "%s: pll init failed: %ld, need separate pll clk driver\n",
+ __func__, PTR_ERR(phy->pll));
dsi_phy_disable_resource(phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 1733f6608a09..c56268cbdb3d 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -48,6 +48,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
struct msm_dsi_dphy_timing {
u32 clk_pre;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
new file mode 100644
index 000000000000..0af951aaeea1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
@@ -0,0 +1,251 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static int dsi_phy_hw_v3_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+ void __iomem *base = phy->base;
+ u32 data = 0;
+
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL);
+ mb(); /* make sure read happened */
+
+ return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v3_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+ void __iomem *lane_base = phy->lane_base;
+ int phy_lane_0 = 0; /* TODO: Support all lane swap configs */
+
+ /*
+ * LPRX and CDRX need to enabled only for physical data lane
+ * corresponding to the logical data lane 0
+ */
+ if (enable)
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+ else
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v3_0_lane_settings(struct msm_dsi_phy *phy)
+{
+ int i;
+ u8 tx_dctrl[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+ void __iomem *lane_base = phy->lane_base;
+
+ /* Strength ctrl settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPTX_STR_CTRL(i),
+ 0x55);
+ /*
+ * Disable LPRX and CDRX for all lanes. And later on, it will
+ * be only enabled for the physical data lane corresponding
+ * to the logical data lane 0
+ */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_LPRX_CTRL(i), 0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_PIN_SWAP(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_HSTX_STR_CTRL(i),
+ 0x88);
+ }
+
+ dsi_phy_hw_v3_0_config_lpcdrx(phy, true);
+
+ /* other settings */
+ for (i = 0; i < 5; i++) {
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG0(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG1(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG2(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_CFG3(i),
+ i == 4 ? 0x80 : 0x0);
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_OFFSET_TOP_CTRL(i), 0x0);
+ dsi_phy_write(lane_base +
+ REG_DSI_10nm_PHY_LN_OFFSET_BOT_CTRL(i), 0x0);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(i),
+ tx_dctrl[i]);
+ }
+
+ /* Toggle BIT 0 to release freeze I/0 */
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x05);
+ dsi_phy_write(lane_base + REG_DSI_10nm_PHY_LN_TX_DCTRL(3), 0x04);
+}
+
+static int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ /*
+ * TODO: These params need to be computed, they're currently hardcoded
+ * for a 1440x2560@60Hz panel with a byteclk of 100.618 Mhz, and a
+ * default escape clock of 19.2 Mhz.
+ */
+
+ timing->hs_halfbyte_en = 0;
+ timing->clk_zero = 0x1c;
+ timing->clk_prepare = 0x07;
+ timing->clk_trail = 0x07;
+ timing->hs_exit = 0x23;
+ timing->hs_zero = 0x21;
+ timing->hs_prepare = 0x07;
+ timing->hs_trail = 0x07;
+ timing->hs_rqst = 0x05;
+ timing->ta_sure = 0x00;
+ timing->ta_go = 0x03;
+ timing->ta_get = 0x04;
+
+ timing->shared_timings.clk_pre = 0x2d;
+ timing->shared_timings.clk_post = 0x0d;
+
+ return 0;
+}
+
+static int dsi_10nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+ struct msm_dsi_phy_clk_request *clk_req)
+{
+ int ret;
+ u32 status;
+ u32 const delay_us = 5;
+ u32 const timeout_us = 1000;
+ struct msm_dsi_dphy_timing *timing = &phy->timing;
+ void __iomem *base = phy->base;
+ u32 data;
+
+ DBG("");
+
+ if (msm_dsi_dphy_timing_calc_v3(timing, clk_req)) {
+ dev_err(&phy->pdev->dev,
+ "%s: D-PHY timing calculation failed\n", __func__);
+ return -EINVAL;
+ }
+
+ if (dsi_phy_hw_v3_0_is_pll_on(phy))
+ pr_warn("PLL turned on before configuring PHY\n");
+
+ /* wait for REFGEN READY */
+ ret = readl_poll_timeout_atomic(base + REG_DSI_10nm_PHY_CMN_PHY_STATUS,
+ status, (status & BIT(0)),
+ delay_us, timeout_us);
+ if (ret) {
+ pr_err("Ref gen not ready. Aborting\n");
+ return -EINVAL;
+ }
+
+ /* de-assert digital and pll power down */
+ data = BIT(6) | BIT(5);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+
+ /* Assert PLL core reset */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+ /* turn off resync FIFO */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+ /* Select MS1 byte-clk */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_GLBL_CTRL, 0x10);
+
+ /* Enable LDO */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_VREG_CTRL, 0x59);
+
+ /* Configure PHY lane swap (TODO: we need to calculate this) */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG0, 0x21);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CFG1, 0x84);
+
+ /* DSI PHY timings */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_0,
+ timing->hs_halfbyte_en);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_1,
+ timing->clk_zero);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_2,
+ timing->clk_prepare);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_3,
+ timing->clk_trail);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_4,
+ timing->hs_exit);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_5,
+ timing->hs_zero);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_6,
+ timing->hs_prepare);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_7,
+ timing->hs_trail);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_8,
+ timing->hs_rqst);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_9,
+ timing->ta_go | (timing->ta_sure << 3));
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_10,
+ timing->ta_get);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_TIMING_CTRL_11,
+ 0x00);
+
+ /* Remove power down from all blocks */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, 0x7f);
+
+ /* power up lanes */
+ data = dsi_phy_read(base + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ /* TODO: only power up lanes that are used */
+ data |= 0x1F;
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_0, data);
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_LANE_CTRL0, 0x1F);
+
+ /* Select full-rate mode */
+ dsi_phy_write(base + REG_DSI_10nm_PHY_CMN_CTRL_2, 0x40);
+
+ ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+ if (ret) {
+ dev_err(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ /* DSI lane settings */
+ dsi_phy_hw_v3_0_lane_settings(phy);
+
+ DBG("DSI%d PHY enabled", phy->id);
+
+ return 0;
+}
+
+static void dsi_10nm_phy_disable(struct msm_dsi_phy *phy)
+{
+}
+
+static int dsi_10nm_phy_init(struct msm_dsi_phy *phy)
+{
+ struct platform_device *pdev = phy->pdev;
+
+ phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
+ "DSI_PHY_LANE");
+ if (IS_ERR(phy->lane_base)) {
+ dev_err(&pdev->dev, "%s: failed to map phy lane base\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs = {
+ .type = MSM_DSI_PHY_10NM,
+ .src_pll_truthtable = { {false, false}, {true, false} },
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vdds", 36000, 32},
+ },
+ },
+ .ops = {
+ .enable = dsi_10nm_phy_enable,
+ .disable = dsi_10nm_phy_disable,
+ .init = dsi_10nm_phy_init,
+ },
+ .io_start = { 0xae94400, 0xae96400 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
index bc289f5c9078..613e206fa4fc 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
@@ -166,6 +166,9 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
case MSM_DSI_PHY_14NM:
pll = msm_dsi_pll_14nm_init(pdev, id);
break;
+ case MSM_DSI_PHY_10NM:
+ pll = msm_dsi_pll_10nm_init(pdev, id);
+ break;
default:
pll = ERR_PTR(-ENXIO);
break;
@@ -173,7 +176,7 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
if (IS_ERR(pll)) {
dev_err(dev, "%s: failed to init DSI PLL\n", __func__);
- return NULL;
+ return pll;
}
pll->type = type;
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
index f63e7ada74a8..8b32271cbc24 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
@@ -115,5 +115,14 @@ msm_dsi_pll_14nm_init(struct platform_device *pdev, int id)
return ERR_PTR(-ENODEV);
}
#endif
+#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id);
+#else
+static inline struct msm_dsi_pll *
+msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+#endif
#endif /* __DSI_PLL_H__ */
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
new file mode 100644
index 000000000000..c4c37a7df637
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -0,0 +1,822 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 10nm - clock diagram (eg: DSI0):
+ *
+ * dsi0_pll_out_div_clk dsi0_pll_bit_clk
+ * | |
+ * | |
+ * +---------+ | +----------+ | +----+
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ * +---------+ | +----------+ | +----+
+ * | |
+ * | | dsi0_pll_by_2_bit_clk
+ * | | |
+ * | | +----+ | |\ dsi0_pclk_mux
+ * | |--| /2 |--o--| \ |
+ * | | +----+ | \ | +---------+
+ * | --------------| |--o--| div_7_4 |-- dsi0pll
+ * |------------------------------| / +---------+
+ * | +-----+ | /
+ * -----------| /4? |--o----------|/
+ * +-----+ | |
+ * | |dsiclk_sel
+ * |
+ * dsi0_pll_post_out_div_clk
+ */
+
+#define DSI_BYTE_PLL_CLK 0
+#define DSI_PIXEL_PLL_CLK 1
+#define NUM_PROVIDED_CLKS 2
+
+struct dsi_pll_regs {
+ u32 pll_prop_gain_rate;
+ u32 pll_lockdet_rate;
+ u32 decimal_div_start;
+ u32 frac_div_start_low;
+ u32 frac_div_start_mid;
+ u32 frac_div_start_high;
+ u32 pll_clock_inverters;
+ u32 ssc_stepsize_low;
+ u32 ssc_stepsize_high;
+ u32 ssc_div_per_low;
+ u32 ssc_div_per_high;
+ u32 ssc_adjper_low;
+ u32 ssc_adjper_high;
+ u32 ssc_control;
+};
+
+struct dsi_pll_config {
+ u32 ref_freq;
+ bool div_override;
+ u32 output_div;
+ bool ignore_frac;
+ bool disable_prescaler;
+ bool enable_ssc;
+ bool ssc_center;
+ u32 dec_bits;
+ u32 frac_bits;
+ u32 lock_timer;
+ u32 ssc_freq;
+ u32 ssc_offset;
+ u32 ssc_adj_per;
+ u32 thresh_cycles;
+ u32 refclk_cycles;
+};
+
+struct pll_10nm_cached_state {
+ unsigned long vco_rate;
+ u8 bit_clk_div;
+ u8 pix_clk_div;
+ u8 pll_out_div;
+ u8 pll_mux;
+};
+
+struct dsi_pll_10nm {
+ struct msm_dsi_pll base;
+
+ int id;
+ struct platform_device *pdev;
+
+ void __iomem *phy_cmn_mmio;
+ void __iomem *mmio;
+
+ u64 vco_ref_clk_rate;
+ u64 vco_current_rate;
+
+ /* protects REG_DSI_10nm_PHY_CMN_CLK_CFG0 register */
+ spinlock_t postdiv_lock;
+
+ int vco_delay;
+ struct dsi_pll_config pll_configuration;
+ struct dsi_pll_regs reg_setup;
+
+ /* private clocks: */
+ struct clk_hw *hws[NUM_DSI_CLOCKS_MAX];
+ u32 num_hws;
+
+ /* clock-provider: */
+ struct clk_hw_onecell_data *hw_data;
+
+ struct pll_10nm_cached_state cached_state;
+
+ enum msm_dsi_phy_usecase uc;
+ struct dsi_pll_10nm *slave;
+};
+
+#define to_pll_10nm(x) container_of(x, struct dsi_pll_10nm, base)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_10nm *pll_10nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_10nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+
+ config->ref_freq = pll->vco_ref_clk_rate;
+ config->output_div = 1;
+ config->dec_bits = 8;
+ config->frac_bits = 18;
+ config->lock_timer = 64;
+ config->ssc_freq = 31500;
+ config->ssc_offset = 5000;
+ config->ssc_adj_per = 2;
+ config->thresh_cycles = 32;
+ config->refclk_cycles = 256;
+
+ config->div_override = false;
+ config->ignore_frac = false;
+ config->disable_prescaler = false;
+
+ config->enable_ssc = false;
+ config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u64 fref = pll->vco_ref_clk_rate;
+ u64 pll_freq;
+ u64 divider;
+ u64 dec, dec_multiple;
+ u32 frac;
+ u64 multiplier;
+
+ pll_freq = pll->vco_current_rate;
+
+ if (config->disable_prescaler)
+ divider = fref;
+ else
+ divider = fref * 2;
+
+ multiplier = 1 << config->frac_bits;
+ dec_multiple = div_u64(pll_freq * multiplier, divider);
+ div_u64_rem(dec_multiple, multiplier, &frac);
+
+ dec = div_u64(dec_multiple, multiplier);
+
+ if (pll_freq <= 1900000000UL)
+ regs->pll_prop_gain_rate = 8;
+ else if (pll_freq <= 3000000000UL)
+ regs->pll_prop_gain_rate = 10;
+ else
+ regs->pll_prop_gain_rate = 12;
+ if (pll_freq < 1100000000UL)
+ regs->pll_clock_inverters = 8;
+ else
+ regs->pll_clock_inverters = 0;
+
+ regs->pll_lockdet_rate = config->lock_timer;
+ regs->decimal_div_start = dec;
+ regs->frac_div_start_low = (frac & 0xff);
+ regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+ regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+#define SSC_CENTER BIT(0)
+#define SSC_EN BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll)
+{
+ struct dsi_pll_config *config = &pll->pll_configuration;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+ u32 ssc_per;
+ u32 ssc_mod;
+ u64 ssc_step_size;
+ u64 frac;
+
+ if (!config->enable_ssc) {
+ DBG("SSC not enabled\n");
+ return;
+ }
+
+ ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+ ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+ ssc_per -= ssc_mod;
+
+ frac = regs->frac_div_start_low |
+ (regs->frac_div_start_mid << 8) |
+ (regs->frac_div_start_high << 16);
+ ssc_step_size = regs->decimal_div_start;
+ ssc_step_size *= (1 << config->frac_bits);
+ ssc_step_size += frac;
+ ssc_step_size *= config->ssc_offset;
+ ssc_step_size *= (config->ssc_adj_per + 1);
+ ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+ ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+ regs->ssc_div_per_low = ssc_per & 0xFF;
+ regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+ regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+ regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+ regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+ regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+ regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+ pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+ regs->decimal_div_start, frac, config->frac_bits);
+ pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+ ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->mmio;
+ struct dsi_pll_regs *regs = &pll->reg_setup;
+
+ if (pll->pll_configuration.enable_ssc) {
+ pr_debug("SSC is enabled\n");
+
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+ regs->ssc_stepsize_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+ regs->ssc_stepsize_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+ regs->ssc_div_per_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+ regs->ssc_div_per_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_LOW_1,
+ regs->ssc_adjper_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_DIV_ADJPER_HIGH_1,
+ regs->ssc_adjper_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_SSC_CONTROL,
+ SSC_EN | regs->ssc_control);
+ }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->mmio;
+
+ pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_ONE, 0x80);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_DSM_DIVIDER, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE,
+ 0xba);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_OUTDIV, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x08);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_BAND_SET_RATE_1, 0xc0);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0xfa);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1,
+ 0x4c);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PFILT, 0x29);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_IFILT, 0x3f);
+}
+
+static void dsi_pll_commit(struct dsi_pll_10nm *pll)
+{
+ void __iomem *base = pll->mmio;
+ struct dsi_pll_regs *reg = &pll->reg_setup;
+
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1,
+ reg->decimal_div_start);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1,
+ reg->frac_div_start_low);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1,
+ reg->frac_div_start_mid);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1,
+ reg->frac_div_start_high);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CMODE, 0x10);
+ pll_write(base + REG_DSI_10nm_PHY_PLL_CLOCK_INVERTERS,
+ reg->pll_clock_inverters);
+}
+
+static int dsi_pll_10nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+ DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_10nm->id, rate,
+ parent_rate);
+
+ pll_10nm->vco_current_rate = rate;
+ pll_10nm->vco_ref_clk_rate = parent_rate;
+
+ dsi_pll_setup_config(pll_10nm);
+
+ dsi_pll_calc_dec_frac(pll_10nm);
+
+ dsi_pll_calc_ssc(pll_10nm);
+
+ dsi_pll_commit(pll_10nm);
+
+ dsi_pll_config_hzindep_reg(pll_10nm);
+
+ dsi_pll_ssc_commit(pll_10nm);
+
+ /* flush, ensure all register writes are done*/
+ wmb();
+
+ return 0;
+}
+
+static int dsi_pll_10nm_lock_status(struct dsi_pll_10nm *pll)
+{
+ int rc;
+ u32 status = 0;
+ u32 const delay_us = 100;
+ u32 const timeout_us = 5000;
+
+ rc = readl_poll_timeout_atomic(pll->mmio +
+ REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE,
+ status,
+ ((status & BIT(0)) > 0),
+ delay_us,
+ timeout_us);
+ if (rc)
+ pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+ pll->id, status);
+
+ return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_10nm *pll)
+{
+ u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
+ data & ~BIT(5));
+ ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_10nm *pll)
+{
+ u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0);
+
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CTRL_0,
+ data | BIT(5));
+ pll_write(pll->mmio + REG_DSI_10nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+ ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_10nm *pll)
+{
+ u32 data;
+
+ data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_10nm *pll)
+{
+ u32 data;
+
+ data = pll_read(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ data | BIT(5));
+}
+
+static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ int rc;
+
+ dsi_pll_enable_pll_bias(pll_10nm);
+ if (pll_10nm->slave)
+ dsi_pll_enable_pll_bias(pll_10nm->slave);
+
+ /* Start PLL */
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
+ 0x01);
+
+ /*
+ * ensure all PLL configurations are written prior to checking
+ * for PLL lock.
+ */
+ wmb();
+
+ /* Check for PLL lock */
+ rc = dsi_pll_10nm_lock_status(pll_10nm);
+ if (rc) {
+ pr_err("PLL(%d) lock failed\n", pll_10nm->id);
+ goto error;
+ }
+
+ pll->pll_on = true;
+
+ dsi_pll_enable_global_clk(pll_10nm);
+ if (pll_10nm->slave)
+ dsi_pll_enable_global_clk(pll_10nm->slave);
+
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL,
+ 0x01);
+ if (pll_10nm->slave)
+ pll_write(pll_10nm->slave->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0x01);
+
+error:
+ return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_10nm *pll)
+{
+ pll_write(pll->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_RBUF_CTRL, 0);
+ dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_10nm_vco_unprepare(struct clk_hw *hw)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+ /*
+ * To avoid any stray glitches while abruptly powering down the PLL
+ * make sure to gate the clock using the clock enable bit before
+ * powering down the PLL
+ */
+ dsi_pll_disable_global_clk(pll_10nm);
+ pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL, 0);
+ dsi_pll_disable_sub(pll_10nm);
+ if (pll_10nm->slave) {
+ dsi_pll_disable_global_clk(pll_10nm->slave);
+ dsi_pll_disable_sub(pll_10nm->slave);
+ }
+ /* flush, ensure all register writes are done */
+ wmb();
+ pll->pll_on = false;
+}
+
+static unsigned long dsi_pll_10nm_vco_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ void __iomem *base = pll_10nm->mmio;
+ u64 ref_clk = pll_10nm->vco_ref_clk_rate;
+ u64 vco_rate = 0x0;
+ u64 multiplier;
+ u32 frac;
+ u32 dec;
+ u64 pll_freq, tmp64;
+
+ dec = pll_read(base + REG_DSI_10nm_PHY_PLL_DECIMAL_DIV_START_1);
+ dec &= 0xff;
+
+ frac = pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+ frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+ 0xff) << 8);
+ frac |= ((pll_read(base + REG_DSI_10nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+ 0x3) << 16);
+
+ /*
+ * TODO:
+ * 1. Assumes prescaler is disabled
+ * 2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+ */
+ multiplier = 1 << 18;
+ pll_freq = dec * (ref_clk * 2);
+ tmp64 = (ref_clk * 2 * frac);
+ pll_freq += div_u64(tmp64, multiplier);
+
+ vco_rate = pll_freq;
+
+ DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+ pll_10nm->id, (unsigned long)vco_rate, dec, frac);
+
+ return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_10nm_vco = {
+ .round_rate = msm_dsi_pll_helper_clk_round_rate,
+ .set_rate = dsi_pll_10nm_vco_set_rate,
+ .recalc_rate = dsi_pll_10nm_vco_recalc_rate,
+ .prepare = dsi_pll_10nm_vco_prepare,
+ .unprepare = dsi_pll_10nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_pll_10nm_save_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+ void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
+ u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+ cached->pll_out_div = pll_read(pll_10nm->mmio +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ cached->pll_out_div &= 0x3;
+
+ cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0);
+ cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+ cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+ cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+ DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+ pll_10nm->id, cached->pll_out_div, cached->bit_clk_div,
+ cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_pll_10nm_restore_state(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct pll_10nm_cached_state *cached = &pll_10nm->cached_state;
+ void __iomem *phy_base = pll_10nm->phy_cmn_mmio;
+ u32 val;
+
+ val = pll_read(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE);
+ val &= ~0x3;
+ val |= cached->pll_out_div;
+ pll_write(pll_10nm->mmio + REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+ pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+ val = pll_read(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1);
+ val &= ~0x3;
+ val |= cached->pll_mux;
+ pll_write(phy_base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, val);
+
+ DBG("DSI PLL%d", pll_10nm->id);
+
+ return 0;
+}
+
+static int dsi_pll_10nm_set_usecase(struct msm_dsi_pll *pll,
+ enum msm_dsi_phy_usecase uc)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ void __iomem *base = pll_10nm->phy_cmn_mmio;
+ u32 data = 0x0; /* internal PLL */
+
+ DBG("DSI PLL%d", pll_10nm->id);
+
+ switch (uc) {
+ case MSM_DSI_PHY_STANDALONE:
+ break;
+ case MSM_DSI_PHY_MASTER:
+ pll_10nm->slave = pll_10nm_list[(pll_10nm->id + 1) % DSI_MAX];
+ break;
+ case MSM_DSI_PHY_SLAVE:
+ data = 0x1; /* external PLL */
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set PLL src */
+ pll_write(base + REG_DSI_10nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+ pll_10nm->uc = uc;
+
+ return 0;
+}
+
+static int dsi_pll_10nm_get_provider(struct msm_dsi_pll *pll,
+ struct clk **byte_clk_provider,
+ struct clk **pixel_clk_provider)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+ struct clk_hw_onecell_data *hw_data = pll_10nm->hw_data;
+
+ DBG("DSI PLL%d", pll_10nm->id);
+
+ if (byte_clk_provider)
+ *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+ if (pixel_clk_provider)
+ *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+ return 0;
+}
+
+static void dsi_pll_10nm_destroy(struct msm_dsi_pll *pll)
+{
+ struct dsi_pll_10nm *pll_10nm = to_pll_10nm(pll);
+
+ DBG("DSI PLL%d", pll_10nm->id);
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
+{
+ char clk_name[32], parent[32], vco_name[32];
+ char parent2[32], parent3[32], parent4[32];
+ struct clk_init_data vco_init = {
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+ .name = vco_name,
+ .flags = CLK_IGNORE_UNUSED,
+ .ops = &clk_ops_dsi_pll_10nm_vco,
+ };
+ struct device *dev = &pll_10nm->pdev->dev;
+ struct clk_hw **hws = pll_10nm->hws;
+ struct clk_hw_onecell_data *hw_data;
+ struct clk_hw *hw;
+ int num = 0;
+ int ret;
+
+ DBG("DSI%d", pll_10nm->id);
+
+ hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+ NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ snprintf(vco_name, 32, "dsi%dvco_clk", pll_10nm->id);
+ pll_10nm->base.clk_hw.init = &vco_init;
+
+ ret = clk_hw_register(dev, &pll_10nm->base.clk_hw);
+ if (ret)
+ return ret;
+
+ hws[num++] = &pll_10nm->base.clk_hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%dvco_clk", pll_10nm->id);
+
+ hw = clk_hw_register_divider(dev, clk_name,
+ parent, CLK_SET_RATE_PARENT,
+ pll_10nm->mmio +
+ REG_DSI_10nm_PHY_PLL_PLL_OUTDIV_RATE,
+ 0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+
+ /* BIT CLK: DIV_CTRL_3_0 */
+ hw = clk_hw_register_divider(dev, clk_name, parent,
+ CLK_SET_RATE_PARENT,
+ pll_10nm->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 0, 4, CLK_DIVIDER_ONE_BASED,
+ &pll_10nm->postdiv_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+ /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ CLK_SET_RATE_PARENT, 1, 8);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+ hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ 0, 1, 2);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+
+ hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+ 0, 1, 4);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
+ snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_10nm->id);
+ snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_10nm->id);
+ snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_10nm->id);
+
+ hw = clk_hw_register_mux(dev, clk_name,
+ (const char *[]){
+ parent, parent2, parent3, parent4
+ }, 4, 0, pll_10nm->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG1,
+ 0, 2, 0, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+
+ snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+ snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
+
+ /* PIX CLK DIV : DIV_CTRL_7_4*/
+ hw = clk_hw_register_divider(dev, clk_name, parent,
+ 0, pll_10nm->phy_cmn_mmio +
+ REG_DSI_10nm_PHY_CMN_CLK_CFG0,
+ 4, 4, CLK_DIVIDER_ONE_BASED,
+ &pll_10nm->postdiv_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hws[num++] = hw;
+ hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
+
+ pll_10nm->num_hws = num;
+
+ hw_data->num = NUM_PROVIDED_CLKS;
+ pll_10nm->hw_data = hw_data;
+
+ ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+ pll_10nm->hw_data);
+ if (ret) {
+ dev_err(dev, "failed to register clk provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
+{
+ struct dsi_pll_10nm *pll_10nm;
+ struct msm_dsi_pll *pll;
+ int ret;
+
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ pll_10nm = devm_kzalloc(&pdev->dev, sizeof(*pll_10nm), GFP_KERNEL);
+ if (!pll_10nm)
+ return ERR_PTR(-ENOMEM);
+
+ DBG("DSI PLL%d", id);
+
+ pll_10nm->pdev = pdev;
+ pll_10nm->id = id;
+ pll_10nm_list[id] = pll_10nm;
+
+ pll_10nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+ if (IS_ERR_OR_NULL(pll_10nm->phy_cmn_mmio)) {
+ dev_err(&pdev->dev, "failed to map CMN PHY base\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll_10nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+ if (IS_ERR_OR_NULL(pll_10nm->mmio)) {
+ dev_err(&pdev->dev, "failed to map PLL base\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ pll = &pll_10nm->base;
+ pll->min_rate = 1000000000UL;
+ pll->max_rate = 3500000000UL;
+ pll->get_provider = dsi_pll_10nm_get_provider;
+ pll->destroy = dsi_pll_10nm_destroy;
+ pll->save_state = dsi_pll_10nm_save_state;
+ pll->restore_state = dsi_pll_10nm_restore_state;
+ pll->set_usecase = dsi_pll_10nm_set_usecase;
+
+ pll_10nm->vco_delay = 1;
+
+ ret = pll_10nm_register(pll_10nm);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register PLL: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ /* TODO: Remove this when we have proper display handover support */
+ msm_dsi_pll_save_state(pll);
+
+ return pll;
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 6e767979aab3..3656155e3793 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -769,7 +769,7 @@ static int msm_hdmi_hdcp_auth_part1_key_exchange(struct hdmi_hdcp_ctrl *hdcp_ctr
if (rc) {
pr_err("%s: wait key and an ready failed\n", __func__);
return rc;
- };
+ }
/* Read BCAPS and send to HDCP engine */
rc = msm_hdmi_hdcp_recv_bcaps(hdcp_ctrl);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1855182c76ce..ba74cb4f94df 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -161,8 +161,11 @@ int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
- if (priv->kms->funcs->debugfs_init)
+ if (priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
+ if (ret)
+ return ret;
+ }
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d90ef1d78a1b..30cd514d8f7c 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -660,7 +660,7 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
ret = msm_gem_cpu_prep(obj, args->op, &timeout);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -678,7 +678,7 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
ret = msm_gem_cpu_fini(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -718,7 +718,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
args->offset = msm_gem_mmap_offset(obj);
}
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -783,7 +783,7 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
ret = 0;
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_put(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 0a653dd2e618..48ed5b9a8580 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -51,7 +51,6 @@ struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
struct msm_fence_context;
-struct msm_fence_cb;
struct msm_gem_address_space;
struct msm_gem_vma;
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index fc175e724ad6..0e0c87252ab0 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -53,7 +53,7 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
for (i = 0; i < n; i++) {
struct drm_gem_object *bo = msm_fb->planes[i];
- drm_gem_object_unreference_unlocked(bo);
+ drm_gem_object_put_unlocked(bo);
}
kfree(msm_fb);
@@ -160,7 +160,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
out_unref:
for (i = 0; i < n; i++)
- drm_gem_object_unreference_unlocked(bos[i]);
+ drm_gem_object_put_unlocked(bos[i]);
return ERR_PTR(ret);
}
@@ -274,7 +274,7 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
- drm_gem_object_unreference_unlocked(bo);
+ drm_gem_object_put_unlocked(bo);
return ERR_CAST(fb);
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 1aa6a4c6530c..b9fe059091f2 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -37,8 +37,6 @@ void msm_fence_context_free(struct msm_fence_context *fctx);
int msm_wait_fence(struct msm_fence_context *fctx, uint32_t fence,
ktime_t *timeout, bool interruptible);
-int msm_queue_fence_cb(struct msm_fence_context *fctx,
- struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 07376de9ff4c..95196479f651 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -470,7 +470,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
*offset = msm_gem_mmap_offset(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
fail:
return ret;
@@ -798,6 +798,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
}
#endif
+/* don't call directly! Use drm_gem_object_put() and friends */
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
@@ -854,7 +855,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ret;
}
@@ -974,7 +975,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -1034,7 +1035,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return ERR_PTR(ret);
}
@@ -1052,7 +1053,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
if (iova) {
ret = msm_gem_get_iova(obj, aspace, iova);
if (ret) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
}
@@ -1060,7 +1061,7 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
vaddr = msm_gem_get_vaddr(obj);
if (IS_ERR(vaddr)) {
msm_gem_put_iova(obj, aspace);
- drm_gem_object_unreference(obj);
+ drm_gem_object_put(obj);
return ERR_CAST(vaddr);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9320e184b48d..c5d9bd3e47a8 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -146,6 +146,7 @@ struct msm_gem_submit {
struct msm_gpu_submitqueue *queue;
struct pid *pid; /* submitting process */
bool valid; /* true if no cmdstream patching needed */
+ bool in_rb; /* "sudo" mode, copy cmds into RB */
struct msm_ringbuffer *ring;
unsigned int nr_cmds;
unsigned int nr_bos;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index b8dc8f96caf2..7bd83e0afa97 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -430,6 +430,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
+ if (args->flags & MSM_SUBMIT_SUDO) {
+ if (!IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) ||
+ !capable(CAP_SYS_RAWIO))
+ return -EINVAL;
+ }
+
queue = msm_submitqueue_get(ctx, args->queueid);
if (!queue)
return -ENOENT;
@@ -471,6 +477,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
+ if (args->flags & MSM_SUBMIT_SUDO)
+ submit->in_rb = true;
+
ret = submit_lookup_objects(submit, args, file);
if (ret)
goto out;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index d34e331554f3..ffbec224551b 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -96,6 +96,8 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name)
{
struct msm_gem_address_space *aspace;
+ u64 size = domain->geometry.aperture_end -
+ domain->geometry.aperture_start;
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
@@ -106,7 +108,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
aspace->mmu = msm_iommu_new(dev, domain);
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
- (domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
+ size >> PAGE_SHIFT);
kref_init(&aspace->kref);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index bd376f9e18a7..1c09acfb4028 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -552,7 +552,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
msm_gem_put_iova(&msm_obj->base, gpu->aspace);
- drm_gem_object_unreference(&msm_obj->base);
+ drm_gem_object_put(&msm_obj->base);
}
pm_runtime_mark_last_busy(&gpu->pdev->dev);
@@ -634,7 +634,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
/* submit takes a reference to the bo and iova until retired: */
- drm_gem_object_reference(&msm_obj->base);
+ drm_gem_object_get(&msm_obj->base);
msm_gem_get_iova(&msm_obj->base,
submit->gpu->aspace, &iova);
@@ -682,8 +682,10 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
GFP_KERNEL);
- if (!gpu->grp_clks)
+ if (!gpu->grp_clks) {
+ gpu->nr_clocks = 0;
return -ENOMEM;
+ }
of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
gpu->grp_clks[i] = get_clock(dev, name);
@@ -865,7 +867,7 @@ fail:
if (gpu->memptrs_bo) {
msm_gem_put_vaddr(gpu->memptrs_bo);
msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ drm_gem_object_put_unlocked(gpu->memptrs_bo);
}
platform_set_drvdata(pdev, NULL);
@@ -888,7 +890,7 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
if (gpu->memptrs_bo) {
msm_gem_put_vaddr(gpu->memptrs_bo);
msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ drm_gem_object_put_unlocked(gpu->memptrs_bo);
}
if (!IS_ERR_OR_NULL(gpu->aspace)) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index fccfccd303af..b8241179175a 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -65,6 +65,8 @@ struct msm_gpu_funcs {
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+ /* for generation specific debugfs: */
+ int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
};
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 6ca98da35f63..6f5295b3f2f6 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -76,7 +76,7 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
if (ring->bo) {
msm_gem_put_iova(ring->bo, ring->gpu->aspace);
msm_gem_put_vaddr(ring->bo);
- drm_gem_object_unreference_unlocked(ring->bo);
+ drm_gem_object_put_unlocked(ring->bo);
}
kfree(ring);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 3e293029e3a6..bbbf353682e1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -510,37 +510,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
return 0;
}
-#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
-
-static void
-nouveau_get_hdmi_dev(struct nouveau_drm *drm)
-{
- struct pci_dev *pdev = drm->dev->pdev;
-
- if (!pdev) {
- NV_DEBUG(drm, "not a PCI device; no HDMI\n");
- drm->hdmi_device = NULL;
- return;
- }
-
- /* subfunction one is a hdmi audio device? */
- drm->hdmi_device = pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
- (unsigned int)pdev->bus->number,
- PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
-
- if (!drm->hdmi_device) {
- NV_DEBUG(drm, "hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
- return;
- }
-
- if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) {
- NV_DEBUG(drm, "possible hdmi device not audio %d\n", drm->hdmi_device->class);
- pci_dev_put(drm->hdmi_device);
- drm->hdmi_device = NULL;
- return;
- }
-}
-
static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
@@ -568,8 +537,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
- nouveau_get_hdmi_dev(drm);
-
/* workaround an odd issue on nvc1 by disabling the device's
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
@@ -655,8 +622,6 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_ttm_fini(drm);
nouveau_vga_fini(drm);
- if (drm->hdmi_device)
- pci_dev_put(drm->hdmi_device);
nouveau_cli_fini(&drm->client);
nouveau_cli_fini(&drm->master);
kfree(drm);
@@ -856,7 +821,6 @@ nouveau_pmops_runtime_suspend(struct device *dev)
}
drm_kms_helper_poll_disable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
nouveau_switcheroo_optimus_dsm();
ret = nouveau_do_suspend(drm_dev, true);
pci_save_state(pdev);
@@ -891,7 +855,6 @@ nouveau_pmops_runtime_resume(struct device *dev)
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
/* Monitors may have been connected / disconnected during suspend */
@@ -913,15 +876,6 @@ nouveau_pmops_runtime_idle(struct device *dev)
return -EBUSY;
}
- /* if we have a hdmi audio device - make sure it has a driver loaded */
- if (drm->hdmi_device) {
- if (!drm->hdmi_device->driver) {
- DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
- pm_runtime_mark_last_busy(dev);
- return -EBUSY;
- }
- }
-
list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
if (crtc->enabled) {
DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 96f6bd8aee5d..881b44b89a01 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -208,7 +208,6 @@ struct nouveau_drm {
bool have_disp_power_ref;
struct dev_pm_domain vga_pm_domain;
- struct pci_dev *hdmi_device;
};
static inline struct nouveau_drm *
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 95ea6abae914..9eabd7201a12 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -40,14 +40,12 @@ static const struct videomode tvc_pal_vm = {
DISPLAY_FLAGS_VSYNC_LOW,
};
-static const struct of_device_id tvc_of_match[];
-
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tvc_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
dev_dbg(ddata->dev, "connect\n");
@@ -55,10 +53,19 @@ static int tvc_connect(struct omap_dss_device *dssdev)
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(ddata->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.atv->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -73,6 +80,9 @@ static void tvc_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.atv->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int tvc_enable(struct omap_dss_device *dssdev)
@@ -175,32 +185,12 @@ static struct omap_dss_driver tvc_driver = {
.set_wss = tvc_set_wss,
};
-static int tvc_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
-
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
- return 0;
-}
-
static int tvc_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
int r;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
@@ -208,10 +198,6 @@ static int tvc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- r = tvc_probe_of(pdev);
- if (r)
- return r;
-
ddata->vm = tvc_pal_vm;
dssdev = &ddata->dssdev;
@@ -224,28 +210,22 @@ static int tvc_probe(struct platform_device *pdev)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit tvc_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(&ddata->dssdev);
tvc_disable(dssdev);
tvc_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 10b4b97d3595..6d8cbd9e2110 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -9,6 +9,7 @@
* the Free Software Foundation.
*/
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -44,6 +45,14 @@ struct panel_drv_data {
struct videomode vm;
struct i2c_adapter *i2c_adapter;
+
+ struct gpio_desc *hpd_gpio;
+
+ void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
+ void *hpd_cb_data;
+ bool hpd_enabled;
+ /* mutex for hpd fields above */
+ struct mutex hpd_lock;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
@@ -51,16 +60,25 @@ struct panel_drv_data {
static int dvic_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dvi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -73,6 +91,9 @@ static void dvic_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dvi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int dvic_enable(struct omap_dss_device *dssdev)
@@ -177,6 +198,9 @@ static int dvic_read_edid(struct omap_dss_device *dssdev,
struct panel_drv_data *ddata = to_panel_data(dssdev);
int r, l, bytes_read;
+ if (ddata->hpd_gpio && !gpiod_get_value_cansleep(ddata->hpd_gpio))
+ return -ENODEV;
+
if (!ddata->i2c_adapter)
return -ENODEV;
@@ -208,6 +232,9 @@ static bool dvic_detect(struct omap_dss_device *dssdev)
unsigned char out;
int r;
+ if (ddata->hpd_gpio)
+ return gpiod_get_value_cansleep(ddata->hpd_gpio);
+
if (!ddata->i2c_adapter)
return true;
@@ -216,6 +243,60 @@ static bool dvic_detect(struct omap_dss_device *dssdev)
return r == 0;
}
+static int dvic_register_hpd_cb(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ if (!ddata->hpd_gpio)
+ return -ENOTSUPP;
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = cb;
+ ddata->hpd_cb_data = cb_data;
+ mutex_unlock(&ddata->hpd_lock);
+ return 0;
+}
+
+static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ if (!ddata->hpd_gpio)
+ return;
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = NULL;
+ ddata->hpd_cb_data = NULL;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
+static void dvic_enable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ if (!ddata->hpd_gpio)
+ return;
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = true;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
+static void dvic_disable_hpd(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
+
+ if (!ddata->hpd_gpio)
+ return;
+
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_enabled = false;
+ mutex_unlock(&ddata->hpd_lock);
+}
+
static struct omap_dss_driver dvic_driver = {
.connect = dvic_connect,
.disconnect = dvic_disconnect,
@@ -229,23 +310,60 @@ static struct omap_dss_driver dvic_driver = {
.read_edid = dvic_read_edid,
.detect = dvic_detect,
+
+ .register_hpd_cb = dvic_register_hpd_cb,
+ .unregister_hpd_cb = dvic_unregister_hpd_cb,
+ .enable_hpd = dvic_enable_hpd,
+ .disable_hpd = dvic_disable_hpd,
};
+static irqreturn_t dvic_hpd_isr(int irq, void *data)
+{
+ struct panel_drv_data *ddata = data;
+
+ mutex_lock(&ddata->hpd_lock);
+ if (ddata->hpd_enabled && ddata->hpd_cb) {
+ enum drm_connector_status status;
+
+ if (dvic_detect(&ddata->dssdev))
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ ddata->hpd_cb(ddata->hpd_cb_data, status);
+ }
+ mutex_unlock(&ddata->hpd_lock);
+
+ return IRQ_HANDLED;
+}
+
static int dvic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
struct device_node *adapter_node;
struct i2c_adapter *adapter;
+ struct gpio_desc *gpio;
+ int r;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
+ gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to parse HPD gpio\n");
+ return PTR_ERR(gpio);
}
- ddata->in = in;
+ ddata->hpd_gpio = gpio;
+
+ mutex_init(&ddata->hpd_lock);
+
+ if (ddata->hpd_gpio) {
+ r = devm_request_threaded_irq(&pdev->dev,
+ gpiod_to_irq(ddata->hpd_gpio), NULL, dvic_hpd_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "DVI HPD", ddata);
+ if (r)
+ return r;
+ }
adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
if (adapter_node) {
@@ -253,7 +371,6 @@ static int dvic_probe_of(struct platform_device *pdev)
of_node_put(adapter_node);
if (adapter == NULL) {
dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
- omap_dss_put_device(ddata->in);
return -EPROBE_DEFER;
}
@@ -275,9 +392,6 @@ static int dvic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (!pdev->dev.of_node)
- return -ENODEV;
-
r = dvic_probe_of(pdev);
if (r)
return r;
@@ -300,9 +414,8 @@ static int dvic_probe(struct platform_device *pdev)
return 0;
err_reg:
- omap_dss_put_device(ddata->in);
-
i2c_put_adapter(ddata->i2c_adapter);
+ mutex_destroy(&ddata->hpd_lock);
return r;
}
@@ -311,17 +424,16 @@ static int __exit dvic_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(&ddata->dssdev);
dvic_disable(dssdev);
dvic_disconnect(dssdev);
- omap_dss_put_device(in);
-
i2c_put_adapter(ddata->i2c_adapter);
+ mutex_destroy(&ddata->hpd_lock);
+
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 2867476419dc..ca30ed9da7eb 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -55,7 +55,7 @@ struct panel_drv_data {
static int hdmic_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
dev_dbg(ddata->dev, "connect\n");
@@ -63,10 +63,19 @@ static int hdmic_connect(struct omap_dss_device *dssdev)
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(ddata->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.hdmi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -81,6 +90,9 @@ static void hdmic_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.hdmi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int hdmic_enable(struct omap_dss_device *dssdev)
@@ -302,7 +314,6 @@ static int hdmic_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
int gpio;
/* HPD GPIO */
@@ -312,14 +323,6 @@ static int hdmic_probe_of(struct platform_device *pdev)
else
ddata->hpd_gpio = -ENODEV;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -336,9 +339,6 @@ static int hdmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
r = hdmic_probe_of(pdev);
if (r)
return r;
@@ -349,7 +349,7 @@ static int hdmic_probe(struct platform_device *pdev)
r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
GPIOF_DIR_IN, "hdmi_hpd");
if (r)
- goto err_reg;
+ return r;
r = devm_request_threaded_irq(&pdev->dev,
gpio_to_irq(ddata->hpd_gpio),
@@ -358,7 +358,7 @@ static int hdmic_probe(struct platform_device *pdev)
IRQF_ONESHOT,
"hdmic hpd", ddata);
if (r)
- goto err_reg;
+ return r;
}
ddata->vm = hdmic_default_vm;
@@ -373,28 +373,22 @@ static int hdmic_probe(struct platform_device *pdev)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit hdmic_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(&ddata->dssdev);
hdmic_disable(dssdev);
hdmic_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index d523c67a3ae3..afee1b8b457a 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -36,7 +36,7 @@ static int opa362_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
dev_dbg(dssdev->dev, "connect\n");
@@ -44,13 +44,22 @@ static int opa362_connect(struct omap_dss_device *dssdev,
if (omapdss_device_is_connected(dssdev))
return -EBUSY;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.atv->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
dst->src = dssdev;
dssdev->dst = dst;
+ ddata->in = in;
return 0;
}
@@ -74,6 +83,9 @@ static void opa362_disconnect(struct omap_dss_device *dssdev,
dssdev->dst = NULL;
in->ops.atv->disconnect(in, &ddata->dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int opa362_enable(struct omap_dss_device *dssdev)
@@ -171,19 +183,13 @@ static const struct omapdss_atv_ops opa362_atv_ops = {
static int opa362_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev, *in;
+ struct omap_dss_device *dssdev;
struct gpio_desc *gpio;
int r;
dev_dbg(&pdev->dev, "probe\n");
- if (node == NULL) {
- dev_err(&pdev->dev, "Unable to find device tree\n");
- return -EINVAL;
- }
-
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
return -ENOMEM;
@@ -196,14 +202,6 @@ static int opa362_probe(struct platform_device *pdev)
ddata->enable_gpio = gpio;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
dssdev = &ddata->dssdev;
dssdev->ops.atv = &opa362_atv_ops;
dssdev->dev = &pdev->dev;
@@ -214,20 +212,16 @@ static int opa362_probe(struct platform_device *pdev)
r = omapdss_register_output(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register output\n");
- goto err_reg;
+ return r;
}
return 0;
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit opa362_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_output(&ddata->dssdev);
@@ -239,8 +233,6 @@ static int __exit opa362_remove(struct platform_device *pdev)
if (omapdss_device_is_connected(dssdev))
opa362_disconnect(dssdev, dssdev->dst);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index e01ab3db6d86..ed7ae384c3ed 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -32,19 +32,28 @@ static int tfp410_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return -EBUSY;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
dst->src = dssdev;
dssdev->dst = dst;
+ ddata->in = in;
return 0;
}
@@ -66,6 +75,9 @@ static void tfp410_disconnect(struct omap_dss_device *dssdev,
dssdev->dst = NULL;
in->ops.dpi->disconnect(in, &ddata->dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int tfp410_enable(struct omap_dss_device *dssdev)
@@ -165,7 +177,6 @@ static int tfp410_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
int gpio;
gpio = of_get_named_gpio(node, "powerdown-gpios", 0);
@@ -178,14 +189,6 @@ static int tfp410_probe_of(struct platform_device *pdev)
return gpio;
}
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -201,9 +204,6 @@ static int tfp410_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (!pdev->dev.of_node)
- return -ENODEV;
-
r = tfp410_probe_of(pdev);
if (r)
return r;
@@ -214,7 +214,7 @@ static int tfp410_probe(struct platform_device *pdev)
if (r) {
dev_err(&pdev->dev, "Failed to request PD GPIO %d\n",
ddata->pd_gpio);
- goto err_gpio;
+ return r;
}
}
@@ -229,21 +229,16 @@ static int tfp410_probe(struct platform_device *pdev)
r = omapdss_register_output(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register output\n");
- goto err_reg;
+ return r;
}
return 0;
-err_reg:
-err_gpio:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit tfp410_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_output(&ddata->dssdev);
@@ -255,8 +250,6 @@ static int __exit tfp410_remove(struct platform_device *pdev)
if (omapdss_device_is_connected(dssdev))
tfp410_disconnect(dssdev, dssdev->dst);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 1fd493e5fa3d..d275bf152da5 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -40,12 +40,20 @@ static int tpd_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.hdmi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
dst->src = dssdev;
dssdev->dst = dst;
@@ -56,6 +64,7 @@ static int tpd_connect(struct omap_dss_device *dssdev,
/* DC-DC converter needs at max 300us to get to 90% of 5V */
udelay(300);
+ ddata->in = in;
return 0;
}
@@ -77,6 +86,9 @@ static void tpd_disconnect(struct omap_dss_device *dssdev,
dssdev->dst = NULL;
in->ops.hdmi->disconnect(in, &ddata->dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int tpd_enable(struct omap_dss_device *dssdev)
@@ -269,23 +281,6 @@ static irqreturn_t tpd_hpd_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int tpd_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
-
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
- return 0;
-}
-
static int tpd_probe(struct platform_device *pdev)
{
struct omap_dss_device *in, *dssdev;
@@ -299,37 +294,24 @@ static int tpd_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (!pdev->dev.of_node)
- return -ENODEV;
-
- r = tpd_probe_of(pdev);
- if (r)
- return r;
-
gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- r = PTR_ERR(gpio);
- goto err_gpio;
- }
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
ddata->ct_cp_hpd_gpio = gpio;
gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- r = PTR_ERR(gpio);
- goto err_gpio;
- }
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
ddata->ls_oe_gpio = gpio;
gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
GPIOD_IN);
- if (IS_ERR(gpio)) {
- r = PTR_ERR(gpio);
- goto err_gpio;
- }
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
ddata->hpd_gpio = gpio;
@@ -340,7 +322,7 @@ static int tpd_probe(struct platform_device *pdev)
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"tpd12s015 hpd", ddata);
if (r)
- goto err_gpio;
+ return r;
dssdev = &ddata->dssdev;
dssdev->ops.hdmi = &tpd_hdmi_ops;
@@ -355,21 +337,16 @@ static int tpd_probe(struct platform_device *pdev)
r = omapdss_register_output(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register output\n");
- goto err_reg;
+ return r;
}
return 0;
-err_reg:
-err_gpio:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit tpd_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_output(&ddata->dssdev);
@@ -381,8 +358,6 @@ static int __exit tpd_remove(struct platform_device *pdev)
if (omapdss_device_is_connected(dssdev))
tpd_disconnect(dssdev, dssdev->dst);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 48a03f55610a..6cbf570d6727 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -38,16 +38,25 @@ struct panel_drv_data {
static int panel_dpi_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -60,6 +69,9 @@ static void panel_dpi_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int panel_dpi_enable(struct omap_dss_device *dssdev)
@@ -157,7 +169,6 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
int r;
struct display_timing timing;
struct gpio_desc *gpio;
@@ -195,14 +206,6 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
videomode_from_timing(&timing, &ddata->vm);
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -212,9 +215,6 @@ static int panel_dpi_probe(struct platform_device *pdev)
struct omap_dss_device *dssdev;
int r;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (ddata == NULL)
return -ENOMEM;
@@ -235,29 +235,22 @@ static int panel_dpi_probe(struct platform_device *pdev)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit panel_dpi_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(dssdev);
panel_dpi_disable(dssdev);
panel_dpi_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 15399a1a666b..428de90fced1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -86,7 +86,7 @@ struct panel_drv_data {
struct workqueue_struct *workqueue;
bool ulps_enabled;
- unsigned ulps_timeout;
+ unsigned int ulps_timeout;
struct delayed_work ulps_work;
};
@@ -513,7 +513,7 @@ static ssize_t dsicm_show_ulps(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- unsigned t;
+ unsigned int t;
mutex_lock(&ddata->lock);
t = ddata->ulps_enabled;
@@ -560,7 +560,7 @@ static ssize_t dsicm_show_ulps_timeout(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- unsigned t;
+ unsigned int t;
mutex_lock(&ddata->lock);
t = ddata->ulps_timeout;
@@ -759,37 +759,46 @@ static int dsicm_panel_reset(struct panel_drv_data *ddata)
static int dsicm_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
struct device *dev = &ddata->pdev->dev;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dsi->connect(in, dssdev);
if (r) {
dev_err(dev, "Failed to connect to video source\n");
- return r;
+ goto err_connect;
}
- r = in->ops.dsi->request_vc(ddata->in, &ddata->channel);
+ r = in->ops.dsi->request_vc(in, &ddata->channel);
if (r) {
dev_err(dev, "failed to get virtual channel\n");
goto err_req_vc;
}
- r = in->ops.dsi->set_vc_id(ddata->in, ddata->channel, TCH);
+ r = in->ops.dsi->set_vc_id(in, ddata->channel, TCH);
if (r) {
dev_err(dev, "failed to set VC_ID\n");
goto err_vc_id;
}
+ ddata->in = in;
return 0;
err_vc_id:
- in->ops.dsi->release_vc(ddata->in, ddata->channel);
+ in->ops.dsi->release_vc(in, ddata->channel);
err_req_vc:
in->ops.dsi->disconnect(in, dssdev);
+err_connect:
+ omap_dss_put_device(in);
return r;
}
@@ -803,6 +812,9 @@ static void dsicm_disconnect(struct omap_dss_device *dssdev)
in->ops.dsi->release_vc(in, ddata->channel);
in->ops.dsi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int dsicm_enable(struct omap_dss_device *dssdev)
@@ -1064,7 +1076,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
int r;
int first = 1;
int plen;
- unsigned buf_used = 0;
+ unsigned int buf_used = 0;
if (size < w * h * 3)
return -ENOMEM;
@@ -1223,7 +1235,6 @@ static int dsicm_probe_of(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device_node *backlight;
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *in;
struct display_timing timing;
int err;
@@ -1259,12 +1270,6 @@ static int dsicm_probe_of(struct platform_device *pdev)
ddata->height_mm = 0;
of_property_read_u32(node, "height-mm", &ddata->height_mm);
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
ddata->vpnl = devm_regulator_get_optional(&pdev->dev, "vpnl");
if (IS_ERR(ddata->vpnl)) {
err = PTR_ERR(ddata->vpnl);
@@ -1281,8 +1286,6 @@ static int dsicm_probe_of(struct platform_device *pdev)
ddata->vddi = NULL;
}
- ddata->in = in;
-
backlight = of_parse_phandle(node, "backlight", 0);
if (backlight) {
ddata->extbldev = of_find_backlight_by_node(backlight);
@@ -1317,9 +1320,6 @@ static int dsicm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->pdev = pdev;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
ddata->vm.hactive = 864;
ddata->vm.vactive = 480;
ddata->vm.pixelclock = 864 * 480 * 60;
@@ -1424,8 +1424,6 @@ static int __exit dsicm_remove(struct platform_device *pdev)
if (ddata->extbldev)
put_device(&ddata->extbldev->dev);
- omap_dss_put_device(ddata->in);
-
dsicm_cancel_ulps_work(ddata);
destroy_workqueue(ddata->workqueue);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 57af22ce87c5..754197099440 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -119,18 +119,27 @@ static void init_lb035q02_panel(struct spi_device *spi)
static int lb035q02_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
init_lb035q02_panel(ddata->spi);
+ ddata->in = in;
return 0;
}
@@ -143,6 +152,9 @@ static void lb035q02_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int lb035q02_enable(struct omap_dss_device *dssdev)
@@ -230,9 +242,7 @@ static struct omap_dss_driver lb035q02_ops = {
static int lb035q02_probe_of(struct spi_device *spi)
{
- struct device_node *node = spi->dev.of_node;
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *in;
struct gpio_desc *gpio;
gpio = devm_gpiod_get(&spi->dev, "enable", GPIOD_OUT_LOW);
@@ -243,14 +253,6 @@ static int lb035q02_probe_of(struct spi_device *spi)
ddata->enable_gpio = gpio;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&spi->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -268,9 +270,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
ddata->spi = spi;
- if (!spi->dev.of_node)
- return -ENODEV;
-
r = lb035q02_probe_of(spi);
if (r)
return r;
@@ -287,29 +286,22 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&spi->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int lb035q02_panel_spi_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(dssdev);
lb035q02_disable(dssdev);
lb035q02_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index bf53676263ad..9a3b27fa5cb5 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -115,16 +115,25 @@ static int init_nec_8048_wvga_lcd(struct spi_device *spi)
static int nec_8048_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -137,6 +146,9 @@ static void nec_8048_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int nec_8048_enable(struct omap_dss_device *dssdev)
@@ -226,7 +238,6 @@ static int nec_8048_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *in;
int gpio;
gpio = of_get_named_gpio(node, "reset-gpios", 0);
@@ -239,14 +250,6 @@ static int nec_8048_probe_of(struct spi_device *spi)
/* XXX the panel spec doesn't mention any QVGA pin?? */
ddata->qvga_gpio = -ENOENT;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&spi->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -277,9 +280,6 @@ static int nec_8048_probe(struct spi_device *spi)
ddata->spi = spi;
- if (!spi->dev.of_node)
- return -ENODEV;
-
r = nec_8048_probe_of(spi);
if (r)
return r;
@@ -288,14 +288,14 @@ static int nec_8048_probe(struct spi_device *spi)
r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
GPIOF_OUT_INIT_HIGH, "lcd QVGA");
if (r)
- goto err_gpio;
+ return r;
}
if (gpio_is_valid(ddata->res_gpio)) {
r = devm_gpio_request_one(&spi->dev, ddata->res_gpio,
GPIOF_OUT_INIT_LOW, "lcd RES");
if (r)
- goto err_gpio;
+ return r;
}
ddata->vm = nec_8048_panel_vm;
@@ -310,22 +310,16 @@ static int nec_8048_probe(struct spi_device *spi)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&spi->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-
-err_reg:
-err_gpio:
- omap_dss_put_device(ddata->in);
- return r;
}
static int nec_8048_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
@@ -334,8 +328,6 @@ static int nec_8048_remove(struct spi_device *spi)
nec_8048_disable(dssdev);
nec_8048_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 34555801fa4c..bb5b680cabfe 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -61,16 +61,25 @@ static const struct videomode sharp_ls_vm = {
static int sharp_ls_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -83,6 +92,9 @@ static void sharp_ls_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int sharp_ls_enable(struct omap_dss_device *dssdev)
@@ -210,8 +222,6 @@ static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
static int sharp_ls_probe_of(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- struct omap_dss_device *in;
int r;
ddata->vcc = devm_regulator_get(&pdev->dev, "envdd");
@@ -245,14 +255,6 @@ static int sharp_ls_probe_of(struct platform_device *pdev)
if (r)
return r;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&pdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -268,9 +270,6 @@ static int sharp_ls_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- if (!pdev->dev.of_node)
- return -ENODEV;
-
r = sharp_ls_probe_of(pdev);
if (r)
return r;
@@ -287,29 +286,22 @@ static int sharp_ls_probe(struct platform_device *pdev)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&pdev->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int __exit sharp_ls_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
omapdss_unregister_display(dssdev);
sharp_ls_disable(dssdev);
sharp_ls_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index 8e5bff4e5226..92fe125ce22e 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -216,12 +216,12 @@ static void set_display_state(struct panel_drv_data *ddata, int enabled)
static int panel_enabled(struct panel_drv_data *ddata)
{
+ __be32 v;
u32 disp_status;
int enabled;
- acx565akm_read(ddata, MIPID_CMD_READ_DISP_STATUS,
- (u8 *)&disp_status, 4);
- disp_status = __be32_to_cpu(disp_status);
+ acx565akm_read(ddata, MIPID_CMD_READ_DISP_STATUS, (u8 *)&v, 4);
+ disp_status = __be32_to_cpu(v);
enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
dev_dbg(&ddata->spi->dev,
"LCD panel %senabled by bootloader (status 0x%04x)\n",
@@ -289,7 +289,7 @@ static void enable_backlight_ctrl(struct panel_drv_data *ddata, int enable)
acx565akm_write(ddata, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
}
-static void set_cabc_mode(struct panel_drv_data *ddata, unsigned mode)
+static void set_cabc_mode(struct panel_drv_data *ddata, unsigned int mode)
{
u16 cabc_ctrl;
@@ -303,12 +303,12 @@ static void set_cabc_mode(struct panel_drv_data *ddata, unsigned mode)
acx565akm_write(ddata, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
}
-static unsigned get_cabc_mode(struct panel_drv_data *ddata)
+static unsigned int get_cabc_mode(struct panel_drv_data *ddata)
{
return ddata->cabc_mode;
}
-static unsigned get_hw_cabc_mode(struct panel_drv_data *ddata)
+static unsigned int get_hw_cabc_mode(struct panel_drv_data *ddata)
{
u8 cabc_ctrl;
@@ -510,16 +510,25 @@ static const struct attribute_group bldev_attr_group = {
static int acx565akm_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.sdi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -532,6 +541,9 @@ static void acx565akm_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.sdi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
@@ -700,12 +712,6 @@ static int acx565akm_probe_of(struct spi_device *spi)
ddata->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
- ddata->in = omapdss_of_find_source_for_first_ep(np);
- if (IS_ERR(ddata->in)) {
- dev_err(&spi->dev, "failed to find video source\n");
- return PTR_ERR(ddata->in);
- }
-
return 0;
}
@@ -720,9 +726,6 @@ static int acx565akm_probe(struct spi_device *spi)
dev_dbg(&spi->dev, "%s\n", __func__);
- if (!spi->dev.of_node)
- return -ENODEV;
-
spi->mode = SPI_MODE_3;
ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
@@ -826,7 +829,6 @@ err_sysfs:
err_reg_bl:
err_detect:
err_gpio:
- omap_dss_put_device(ddata->in);
return r;
}
@@ -834,7 +836,6 @@ static int acx565akm_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
@@ -846,8 +847,6 @@ static int acx565akm_remove(struct spi_device *spi)
acx565akm_disable(dssdev);
acx565akm_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index 2721a86ac5e7..b5d8a00df811 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -169,16 +169,25 @@ enum jbt_register {
static int td028ttec1_panel_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -191,6 +200,9 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
@@ -362,23 +374,6 @@ static struct omap_dss_driver td028ttec1_ops = {
.check_timings = td028ttec1_panel_check_timings,
};
-static int td028ttec1_probe_of(struct spi_device *spi)
-{
- struct device_node *node = spi->dev.of_node;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *in;
-
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&spi->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
- return 0;
-}
-
static int td028ttec1_panel_probe(struct spi_device *spi)
{
struct panel_drv_data *ddata;
@@ -404,13 +399,6 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
ddata->spi_dev = spi;
- if (!spi->dev.of_node)
- return -ENODEV;
-
- r = td028ttec1_probe_of(spi);
- if (r)
- return r;
-
ddata->vm = td028ttec1_panel_vm;
dssdev = &ddata->dssdev;
@@ -423,21 +411,16 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
r = omapdss_register_display(dssdev);
if (r) {
dev_err(&spi->dev, "Failed to register panel\n");
- goto err_reg;
+ return r;
}
return 0;
-
-err_reg:
- omap_dss_put_device(ddata->in);
- return r;
}
static int td028ttec1_panel_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__);
@@ -446,8 +429,6 @@ static int td028ttec1_panel_remove(struct spi_device *spi)
td028ttec1_panel_disable(dssdev);
td028ttec1_panel_disconnect(dssdev);
- omap_dss_put_device(in);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index ac4a6d4d134c..c08e22b43447 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -340,16 +340,25 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata)
static int tpo_td043_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *in;
int r;
if (omapdss_device_is_connected(dssdev))
return 0;
+ in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
+ if (IS_ERR(in)) {
+ dev_err(dssdev->dev, "failed to find video source\n");
+ return PTR_ERR(in);
+ }
+
r = in->ops.dpi->connect(in, dssdev);
- if (r)
+ if (r) {
+ omap_dss_put_device(in);
return r;
+ }
+ ddata->in = in;
return 0;
}
@@ -362,6 +371,9 @@ static void tpo_td043_disconnect(struct omap_dss_device *dssdev)
return;
in->ops.dpi->disconnect(in, dssdev);
+
+ omap_dss_put_device(in);
+ ddata->in = NULL;
}
static int tpo_td043_enable(struct omap_dss_device *dssdev)
@@ -463,7 +475,6 @@ static int tpo_td043_probe_of(struct spi_device *spi)
{
struct device_node *node = spi->dev.of_node;
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct omap_dss_device *in;
int gpio;
gpio = of_get_named_gpio(node, "reset-gpios", 0);
@@ -473,14 +484,6 @@ static int tpo_td043_probe_of(struct spi_device *spi)
}
ddata->nreset_gpio = gpio;
- in = omapdss_of_find_source_for_first_ep(node);
- if (IS_ERR(in)) {
- dev_err(&spi->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- ddata->in = in;
-
return 0;
}
@@ -509,9 +512,6 @@ static int tpo_td043_probe(struct spi_device *spi)
ddata->spi = spi;
- if (!spi->dev.of_node)
- return -ENODEV;
-
r = tpo_td043_probe_of(spi);
if (r)
return r;
@@ -564,7 +564,6 @@ err_reg:
err_sysfs:
err_gpio_req:
err_regulator:
- omap_dss_put_device(ddata->in);
return r;
}
@@ -572,7 +571,6 @@ static int tpo_td043_remove(struct spi_device *spi)
{
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
@@ -581,8 +579,6 @@ static int tpo_td043_remove(struct spi_device *spi)
tpo_td043_disable(dssdev);
tpo_td043_disconnect(dssdev);
- omap_dss_put_device(in);
-
sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
return 0;
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 67cc87a4f1f6..99e8cb8dc65b 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -18,10 +18,11 @@
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/list.h>
+
+#include "dss.h"
#include "omapdss.h"
-static bool dss_initialized;
-static const struct dispc_ops *ops;
+static struct dss_device *dss_device;
static struct list_head omapdss_comp_list;
@@ -31,27 +32,27 @@ struct omapdss_comp_node {
bool dss_core_component;
};
-void omapdss_set_is_initialized(bool set)
+struct dss_device *omapdss_get_dss(void)
{
- dss_initialized = set;
+ return dss_device;
}
-EXPORT_SYMBOL(omapdss_set_is_initialized);
+EXPORT_SYMBOL(omapdss_get_dss);
-bool omapdss_is_initialized(void)
+void omapdss_set_dss(struct dss_device *dss)
{
- return dss_initialized;
+ dss_device = dss;
}
-EXPORT_SYMBOL(omapdss_is_initialized);
+EXPORT_SYMBOL(omapdss_set_dss);
-void dispc_set_ops(const struct dispc_ops *o)
+struct dispc_device *dispc_get_dispc(struct dss_device *dss)
{
- ops = o;
+ return dss->dispc;
}
-EXPORT_SYMBOL(dispc_set_ops);
+EXPORT_SYMBOL(dispc_get_dispc);
-const struct dispc_ops *dispc_get_ops(void)
+const struct dispc_ops *dispc_get_ops(struct dss_device *dss)
{
- return ops;
+ return dss->dispc_ops;
}
EXPORT_SYMBOL(dispc_get_ops);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 4e8f68efd169..5e2e65e88847 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -47,6 +47,8 @@
#include "dss.h"
#include "dispc.h"
+struct dispc_device;
+
/* DISPC */
#define DISPC_SZ_REGS SZ_4K
@@ -56,11 +58,12 @@ enum omap_burst_size {
BURST_SIZE_X8 = 2,
};
-#define REG_GET(idx, start, end) \
- FLD_GET(dispc_read_reg(idx), start, end)
+#define REG_GET(dispc, idx, start, end) \
+ FLD_GET(dispc_read_reg(dispc, idx), start, end)
-#define REG_FLD_MOD(idx, val, start, end) \
- dispc_write_reg(idx, FLD_MOD(dispc_read_reg(idx), val, start, end))
+#define REG_FLD_MOD(dispc, idx, val, start, end) \
+ dispc_write_reg(dispc, idx, \
+ FLD_MOD(dispc_read_reg(dispc, idx), val, start, end))
/* DISPC has feature id */
enum dispc_feature_id {
@@ -105,7 +108,8 @@ struct dispc_features {
unsigned int max_downscale;
unsigned int max_line_width;
unsigned int min_pcd;
- int (*calc_scaling) (unsigned long pclk, unsigned long lclk,
+ int (*calc_scaling)(struct dispc_device *dispc,
+ unsigned long pclk, unsigned long lclk,
const struct videomode *vm,
u16 width, u16 height, u16 out_width, u16 out_height,
u32 fourcc, bool *five_taps,
@@ -162,9 +166,12 @@ struct dispc_features {
#define DISPC_MAX_NR_FIFOS 5
#define DISPC_MAX_CHANNEL_GAMMA 4
-static struct {
+struct dispc_device {
struct platform_device *pdev;
void __iomem *base;
+ struct dss_device *dss;
+
+ struct dss_debugfs_entry *debugfs;
int irq;
irq_handler_t user_handler;
@@ -191,7 +198,7 @@ static struct {
/* DISPC_CONTROL & DISPC_CONFIG lock*/
spinlock_t control_lock;
-} dispc;
+};
enum omap_color_component {
/* used for all color formats for OMAP3 and earlier
@@ -345,313 +352,315 @@ static const struct {
},
};
-struct color_conv_coef {
- int ry, rcr, rcb, gy, gcr, gcb, by, bcr, bcb;
- int full_range;
-};
-
-static unsigned long dispc_fclk_rate(void);
-static unsigned long dispc_core_clk_rate(void);
-static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
-static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
+static unsigned long dispc_fclk_rate(struct dispc_device *dispc);
+static unsigned long dispc_core_clk_rate(struct dispc_device *dispc);
+static unsigned long dispc_mgr_lclk_rate(struct dispc_device *dispc,
+ enum omap_channel channel);
+static unsigned long dispc_mgr_pclk_rate(struct dispc_device *dispc,
+ enum omap_channel channel);
-static unsigned long dispc_plane_pclk_rate(enum omap_plane_id plane);
-static unsigned long dispc_plane_lclk_rate(enum omap_plane_id plane);
+static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc,
+ enum omap_plane_id plane);
+static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc,
+ enum omap_plane_id plane);
-static void dispc_clear_irqstatus(u32 mask);
-static bool dispc_mgr_is_enabled(enum omap_channel channel);
-static void dispc_clear_irqstatus(u32 mask);
+static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask);
-static inline void dispc_write_reg(const u16 idx, u32 val)
+static inline void dispc_write_reg(struct dispc_device *dispc, u16 idx, u32 val)
{
- __raw_writel(val, dispc.base + idx);
+ __raw_writel(val, dispc->base + idx);
}
-static inline u32 dispc_read_reg(const u16 idx)
+static inline u32 dispc_read_reg(struct dispc_device *dispc, u16 idx)
{
- return __raw_readl(dispc.base + idx);
+ return __raw_readl(dispc->base + idx);
}
-static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld)
+static u32 mgr_fld_read(struct dispc_device *dispc, enum omap_channel channel,
+ enum mgr_reg_fields regfld)
{
const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
- return REG_GET(rfld.reg, rfld.high, rfld.low);
+
+ return REG_GET(dispc, rfld.reg, rfld.high, rfld.low);
}
-static void mgr_fld_write(enum omap_channel channel,
- enum mgr_reg_fields regfld, int val) {
+static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel,
+ enum mgr_reg_fields regfld, int val)
+{
const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld];
const bool need_lock = rfld.reg == DISPC_CONTROL || rfld.reg == DISPC_CONFIG;
unsigned long flags;
- if (need_lock)
- spin_lock_irqsave(&dispc.control_lock, flags);
-
- REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low);
-
- if (need_lock)
- spin_unlock_irqrestore(&dispc.control_lock, flags);
+ if (need_lock) {
+ spin_lock_irqsave(&dispc->control_lock, flags);
+ REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
+ spin_unlock_irqrestore(&dispc->control_lock, flags);
+ } else {
+ REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low);
+ }
}
-static int dispc_get_num_ovls(void)
+static int dispc_get_num_ovls(struct dispc_device *dispc)
{
- return dispc.feat->num_ovls;
+ return dispc->feat->num_ovls;
}
-static int dispc_get_num_mgrs(void)
+static int dispc_get_num_mgrs(struct dispc_device *dispc)
{
- return dispc.feat->num_mgrs;
+ return dispc->feat->num_mgrs;
}
-static void dispc_get_reg_field(enum dispc_feat_reg_field id,
+static void dispc_get_reg_field(struct dispc_device *dispc,
+ enum dispc_feat_reg_field id,
u8 *start, u8 *end)
{
- if (id >= dispc.feat->num_reg_fields)
+ if (id >= dispc->feat->num_reg_fields)
BUG();
- *start = dispc.feat->reg_fields[id].start;
- *end = dispc.feat->reg_fields[id].end;
+ *start = dispc->feat->reg_fields[id].start;
+ *end = dispc->feat->reg_fields[id].end;
}
-static bool dispc_has_feature(enum dispc_feature_id id)
+static bool dispc_has_feature(struct dispc_device *dispc,
+ enum dispc_feature_id id)
{
unsigned int i;
- for (i = 0; i < dispc.feat->num_features; i++) {
- if (dispc.feat->features[i] == id)
+ for (i = 0; i < dispc->feat->num_features; i++) {
+ if (dispc->feat->features[i] == id)
return true;
}
return false;
}
-#define SR(reg) \
- dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
-#define RR(reg) \
- dispc_write_reg(DISPC_##reg, dispc.ctx[DISPC_##reg / sizeof(u32)])
+#define SR(dispc, reg) \
+ dispc->ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(dispc, DISPC_##reg)
+#define RR(dispc, reg) \
+ dispc_write_reg(dispc, DISPC_##reg, dispc->ctx[DISPC_##reg / sizeof(u32)])
-static void dispc_save_context(void)
+static void dispc_save_context(struct dispc_device *dispc)
{
int i, j;
DSSDBG("dispc_save_context\n");
- SR(IRQENABLE);
- SR(CONTROL);
- SR(CONFIG);
- SR(LINE_NUMBER);
- if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
- SR(GLOBAL_ALPHA);
- if (dispc_has_feature(FEAT_MGR_LCD2)) {
- SR(CONTROL2);
- SR(CONFIG2);
+ SR(dispc, IRQENABLE);
+ SR(dispc, CONTROL);
+ SR(dispc, CONFIG);
+ SR(dispc, LINE_NUMBER);
+ if (dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
+ SR(dispc, GLOBAL_ALPHA);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2)) {
+ SR(dispc, CONTROL2);
+ SR(dispc, CONFIG2);
}
- if (dispc_has_feature(FEAT_MGR_LCD3)) {
- SR(CONTROL3);
- SR(CONFIG3);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3)) {
+ SR(dispc, CONTROL3);
+ SR(dispc, CONFIG3);
}
- for (i = 0; i < dispc_get_num_mgrs(); i++) {
- SR(DEFAULT_COLOR(i));
- SR(TRANS_COLOR(i));
- SR(SIZE_MGR(i));
+ for (i = 0; i < dispc_get_num_mgrs(dispc); i++) {
+ SR(dispc, DEFAULT_COLOR(i));
+ SR(dispc, TRANS_COLOR(i));
+ SR(dispc, SIZE_MGR(i));
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
- SR(TIMING_H(i));
- SR(TIMING_V(i));
- SR(POL_FREQ(i));
- SR(DIVISORo(i));
-
- SR(DATA_CYCLE1(i));
- SR(DATA_CYCLE2(i));
- SR(DATA_CYCLE3(i));
-
- if (dispc_has_feature(FEAT_CPR)) {
- SR(CPR_COEF_R(i));
- SR(CPR_COEF_G(i));
- SR(CPR_COEF_B(i));
+ SR(dispc, TIMING_H(i));
+ SR(dispc, TIMING_V(i));
+ SR(dispc, POL_FREQ(i));
+ SR(dispc, DIVISORo(i));
+
+ SR(dispc, DATA_CYCLE1(i));
+ SR(dispc, DATA_CYCLE2(i));
+ SR(dispc, DATA_CYCLE3(i));
+
+ if (dispc_has_feature(dispc, FEAT_CPR)) {
+ SR(dispc, CPR_COEF_R(i));
+ SR(dispc, CPR_COEF_G(i));
+ SR(dispc, CPR_COEF_B(i));
}
}
- for (i = 0; i < dispc_get_num_ovls(); i++) {
- SR(OVL_BA0(i));
- SR(OVL_BA1(i));
- SR(OVL_POSITION(i));
- SR(OVL_SIZE(i));
- SR(OVL_ATTRIBUTES(i));
- SR(OVL_FIFO_THRESHOLD(i));
- SR(OVL_ROW_INC(i));
- SR(OVL_PIXEL_INC(i));
- if (dispc_has_feature(FEAT_PRELOAD))
- SR(OVL_PRELOAD(i));
+ for (i = 0; i < dispc_get_num_ovls(dispc); i++) {
+ SR(dispc, OVL_BA0(i));
+ SR(dispc, OVL_BA1(i));
+ SR(dispc, OVL_POSITION(i));
+ SR(dispc, OVL_SIZE(i));
+ SR(dispc, OVL_ATTRIBUTES(i));
+ SR(dispc, OVL_FIFO_THRESHOLD(i));
+ SR(dispc, OVL_ROW_INC(i));
+ SR(dispc, OVL_PIXEL_INC(i));
+ if (dispc_has_feature(dispc, FEAT_PRELOAD))
+ SR(dispc, OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
- SR(OVL_WINDOW_SKIP(i));
- SR(OVL_TABLE_BA(i));
+ SR(dispc, OVL_WINDOW_SKIP(i));
+ SR(dispc, OVL_TABLE_BA(i));
continue;
}
- SR(OVL_FIR(i));
- SR(OVL_PICTURE_SIZE(i));
- SR(OVL_ACCU0(i));
- SR(OVL_ACCU1(i));
+ SR(dispc, OVL_FIR(i));
+ SR(dispc, OVL_PICTURE_SIZE(i));
+ SR(dispc, OVL_ACCU0(i));
+ SR(dispc, OVL_ACCU1(i));
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_H(i, j));
+ SR(dispc, OVL_FIR_COEF_H(i, j));
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_HV(i, j));
+ SR(dispc, OVL_FIR_COEF_HV(i, j));
for (j = 0; j < 5; j++)
- SR(OVL_CONV_COEF(i, j));
+ SR(dispc, OVL_CONV_COEF(i, j));
- if (dispc_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(dispc, FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_V(i, j));
+ SR(dispc, OVL_FIR_COEF_V(i, j));
}
- if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- SR(OVL_BA0_UV(i));
- SR(OVL_BA1_UV(i));
- SR(OVL_FIR2(i));
- SR(OVL_ACCU2_0(i));
- SR(OVL_ACCU2_1(i));
+ if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
+ SR(dispc, OVL_BA0_UV(i));
+ SR(dispc, OVL_BA1_UV(i));
+ SR(dispc, OVL_FIR2(i));
+ SR(dispc, OVL_ACCU2_0(i));
+ SR(dispc, OVL_ACCU2_1(i));
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_H2(i, j));
+ SR(dispc, OVL_FIR_COEF_H2(i, j));
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_HV2(i, j));
+ SR(dispc, OVL_FIR_COEF_HV2(i, j));
for (j = 0; j < 8; j++)
- SR(OVL_FIR_COEF_V2(i, j));
+ SR(dispc, OVL_FIR_COEF_V2(i, j));
}
- if (dispc_has_feature(FEAT_ATTR2))
- SR(OVL_ATTRIBUTES2(i));
+ if (dispc_has_feature(dispc, FEAT_ATTR2))
+ SR(dispc, OVL_ATTRIBUTES2(i));
}
- if (dispc_has_feature(FEAT_CORE_CLK_DIV))
- SR(DIVISOR);
+ if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV))
+ SR(dispc, DIVISOR);
- dispc.ctx_valid = true;
+ dispc->ctx_valid = true;
DSSDBG("context saved\n");
}
-static void dispc_restore_context(void)
+static void dispc_restore_context(struct dispc_device *dispc)
{
int i, j;
DSSDBG("dispc_restore_context\n");
- if (!dispc.ctx_valid)
+ if (!dispc->ctx_valid)
return;
- /*RR(IRQENABLE);*/
- /*RR(CONTROL);*/
- RR(CONFIG);
- RR(LINE_NUMBER);
- if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
- RR(GLOBAL_ALPHA);
- if (dispc_has_feature(FEAT_MGR_LCD2))
- RR(CONFIG2);
- if (dispc_has_feature(FEAT_MGR_LCD3))
- RR(CONFIG3);
-
- for (i = 0; i < dispc_get_num_mgrs(); i++) {
- RR(DEFAULT_COLOR(i));
- RR(TRANS_COLOR(i));
- RR(SIZE_MGR(i));
+ /*RR(dispc, IRQENABLE);*/
+ /*RR(dispc, CONTROL);*/
+ RR(dispc, CONFIG);
+ RR(dispc, LINE_NUMBER);
+ if (dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
+ RR(dispc, GLOBAL_ALPHA);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
+ RR(dispc, CONFIG2);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
+ RR(dispc, CONFIG3);
+
+ for (i = 0; i < dispc_get_num_mgrs(dispc); i++) {
+ RR(dispc, DEFAULT_COLOR(i));
+ RR(dispc, TRANS_COLOR(i));
+ RR(dispc, SIZE_MGR(i));
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
- RR(TIMING_H(i));
- RR(TIMING_V(i));
- RR(POL_FREQ(i));
- RR(DIVISORo(i));
-
- RR(DATA_CYCLE1(i));
- RR(DATA_CYCLE2(i));
- RR(DATA_CYCLE3(i));
-
- if (dispc_has_feature(FEAT_CPR)) {
- RR(CPR_COEF_R(i));
- RR(CPR_COEF_G(i));
- RR(CPR_COEF_B(i));
+ RR(dispc, TIMING_H(i));
+ RR(dispc, TIMING_V(i));
+ RR(dispc, POL_FREQ(i));
+ RR(dispc, DIVISORo(i));
+
+ RR(dispc, DATA_CYCLE1(i));
+ RR(dispc, DATA_CYCLE2(i));
+ RR(dispc, DATA_CYCLE3(i));
+
+ if (dispc_has_feature(dispc, FEAT_CPR)) {
+ RR(dispc, CPR_COEF_R(i));
+ RR(dispc, CPR_COEF_G(i));
+ RR(dispc, CPR_COEF_B(i));
}
}
- for (i = 0; i < dispc_get_num_ovls(); i++) {
- RR(OVL_BA0(i));
- RR(OVL_BA1(i));
- RR(OVL_POSITION(i));
- RR(OVL_SIZE(i));
- RR(OVL_ATTRIBUTES(i));
- RR(OVL_FIFO_THRESHOLD(i));
- RR(OVL_ROW_INC(i));
- RR(OVL_PIXEL_INC(i));
- if (dispc_has_feature(FEAT_PRELOAD))
- RR(OVL_PRELOAD(i));
+ for (i = 0; i < dispc_get_num_ovls(dispc); i++) {
+ RR(dispc, OVL_BA0(i));
+ RR(dispc, OVL_BA1(i));
+ RR(dispc, OVL_POSITION(i));
+ RR(dispc, OVL_SIZE(i));
+ RR(dispc, OVL_ATTRIBUTES(i));
+ RR(dispc, OVL_FIFO_THRESHOLD(i));
+ RR(dispc, OVL_ROW_INC(i));
+ RR(dispc, OVL_PIXEL_INC(i));
+ if (dispc_has_feature(dispc, FEAT_PRELOAD))
+ RR(dispc, OVL_PRELOAD(i));
if (i == OMAP_DSS_GFX) {
- RR(OVL_WINDOW_SKIP(i));
- RR(OVL_TABLE_BA(i));
+ RR(dispc, OVL_WINDOW_SKIP(i));
+ RR(dispc, OVL_TABLE_BA(i));
continue;
}
- RR(OVL_FIR(i));
- RR(OVL_PICTURE_SIZE(i));
- RR(OVL_ACCU0(i));
- RR(OVL_ACCU1(i));
+ RR(dispc, OVL_FIR(i));
+ RR(dispc, OVL_PICTURE_SIZE(i));
+ RR(dispc, OVL_ACCU0(i));
+ RR(dispc, OVL_ACCU1(i));
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_H(i, j));
+ RR(dispc, OVL_FIR_COEF_H(i, j));
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_HV(i, j));
+ RR(dispc, OVL_FIR_COEF_HV(i, j));
for (j = 0; j < 5; j++)
- RR(OVL_CONV_COEF(i, j));
+ RR(dispc, OVL_CONV_COEF(i, j));
- if (dispc_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(dispc, FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_V(i, j));
+ RR(dispc, OVL_FIR_COEF_V(i, j));
}
- if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- RR(OVL_BA0_UV(i));
- RR(OVL_BA1_UV(i));
- RR(OVL_FIR2(i));
- RR(OVL_ACCU2_0(i));
- RR(OVL_ACCU2_1(i));
+ if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
+ RR(dispc, OVL_BA0_UV(i));
+ RR(dispc, OVL_BA1_UV(i));
+ RR(dispc, OVL_FIR2(i));
+ RR(dispc, OVL_ACCU2_0(i));
+ RR(dispc, OVL_ACCU2_1(i));
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_H2(i, j));
+ RR(dispc, OVL_FIR_COEF_H2(i, j));
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_HV2(i, j));
+ RR(dispc, OVL_FIR_COEF_HV2(i, j));
for (j = 0; j < 8; j++)
- RR(OVL_FIR_COEF_V2(i, j));
+ RR(dispc, OVL_FIR_COEF_V2(i, j));
}
- if (dispc_has_feature(FEAT_ATTR2))
- RR(OVL_ATTRIBUTES2(i));
+ if (dispc_has_feature(dispc, FEAT_ATTR2))
+ RR(dispc, OVL_ATTRIBUTES2(i));
}
- if (dispc_has_feature(FEAT_CORE_CLK_DIV))
- RR(DIVISOR);
+ if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV))
+ RR(dispc, DIVISOR);
/* enable last, because LCD & DIGIT enable are here */
- RR(CONTROL);
- if (dispc_has_feature(FEAT_MGR_LCD2))
- RR(CONTROL2);
- if (dispc_has_feature(FEAT_MGR_LCD3))
- RR(CONTROL3);
+ RR(dispc, CONTROL);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
+ RR(dispc, CONTROL2);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
+ RR(dispc, CONTROL3);
/* clear spurious SYNC_LOST_DIGIT interrupts */
- dispc_clear_irqstatus(DISPC_IRQ_SYNC_LOST_DIGIT);
+ dispc_clear_irqstatus(dispc, DISPC_IRQ_SYNC_LOST_DIGIT);
/*
* enable last so IRQs won't trigger before
* the context is fully restored
*/
- RR(IRQENABLE);
+ RR(dispc, IRQENABLE);
DSSDBG("context restored\n");
}
@@ -659,146 +668,159 @@ static void dispc_restore_context(void)
#undef SR
#undef RR
-int dispc_runtime_get(void)
+int dispc_runtime_get(struct dispc_device *dispc)
{
int r;
DSSDBG("dispc_runtime_get\n");
- r = pm_runtime_get_sync(&dispc.pdev->dev);
+ r = pm_runtime_get_sync(&dispc->pdev->dev);
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
-void dispc_runtime_put(void)
+void dispc_runtime_put(struct dispc_device *dispc)
{
int r;
DSSDBG("dispc_runtime_put\n");
- r = pm_runtime_put_sync(&dispc.pdev->dev);
+ r = pm_runtime_put_sync(&dispc->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
-static u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
+static u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc,
+ enum omap_channel channel)
{
return mgr_desc[channel].vsync_irq;
}
-static u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
+static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc,
+ enum omap_channel channel)
{
- if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc.feat->no_framedone_tv)
+ if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc->feat->no_framedone_tv)
return 0;
return mgr_desc[channel].framedone_irq;
}
-static u32 dispc_mgr_get_sync_lost_irq(enum omap_channel channel)
+static u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc,
+ enum omap_channel channel)
{
return mgr_desc[channel].sync_lost_irq;
}
-u32 dispc_wb_get_framedone_irq(void)
+static u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc)
{
return DISPC_IRQ_FRAMEDONEWB;
}
-static void dispc_mgr_enable(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable)
{
- mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_ENABLE, enable);
/* flush posted write */
- mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
+ mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE);
}
-static bool dispc_mgr_is_enabled(enum omap_channel channel)
+static bool dispc_mgr_is_enabled(struct dispc_device *dispc,
+ enum omap_channel channel)
{
- return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE);
+ return !!mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE);
}
-static bool dispc_mgr_go_busy(enum omap_channel channel)
+static bool dispc_mgr_go_busy(struct dispc_device *dispc,
+ enum omap_channel channel)
{
- return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1;
+ return mgr_fld_read(dispc, channel, DISPC_MGR_FLD_GO) == 1;
}
-static void dispc_mgr_go(enum omap_channel channel)
+static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel)
{
- WARN_ON(!dispc_mgr_is_enabled(channel));
- WARN_ON(dispc_mgr_go_busy(channel));
+ WARN_ON(!dispc_mgr_is_enabled(dispc, channel));
+ WARN_ON(dispc_mgr_go_busy(dispc, channel));
DSSDBG("GO %s\n", mgr_desc[channel].name);
- mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1);
}
-bool dispc_wb_go_busy(void)
+static bool dispc_wb_go_busy(struct dispc_device *dispc)
{
- return REG_GET(DISPC_CONTROL2, 6, 6) == 1;
+ return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
}
-void dispc_wb_go(void)
+static void dispc_wb_go(struct dispc_device *dispc)
{
enum omap_plane_id plane = OMAP_DSS_WB;
bool enable, go;
- enable = REG_GET(DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
+ enable = REG_GET(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 0) == 1;
if (!enable)
return;
- go = REG_GET(DISPC_CONTROL2, 6, 6) == 1;
+ go = REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1;
if (go) {
DSSERR("GO bit not down for WB\n");
return;
}
- REG_FLD_MOD(DISPC_CONTROL2, 1, 6, 6);
+ REG_FLD_MOD(dispc, DISPC_CONTROL2, 1, 6, 6);
}
-static void dispc_ovl_write_firh_reg(enum omap_plane_id plane, int reg,
+static void dispc_ovl_write_firh_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
u32 value)
{
- dispc_write_reg(DISPC_OVL_FIR_COEF_H(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_H(plane, reg), value);
}
-static void dispc_ovl_write_firhv_reg(enum omap_plane_id plane, int reg,
+static void dispc_ovl_write_firhv_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
u32 value)
{
- dispc_write_reg(DISPC_OVL_FIR_COEF_HV(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_HV(plane, reg), value);
}
-static void dispc_ovl_write_firv_reg(enum omap_plane_id plane, int reg,
+static void dispc_ovl_write_firv_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
u32 value)
{
- dispc_write_reg(DISPC_OVL_FIR_COEF_V(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_V(plane, reg), value);
}
-static void dispc_ovl_write_firh2_reg(enum omap_plane_id plane, int reg,
+static void dispc_ovl_write_firh2_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_write_reg(DISPC_OVL_FIR_COEF_H2(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_H2(plane, reg), value);
}
-static void dispc_ovl_write_firhv2_reg(enum omap_plane_id plane, int reg,
- u32 value)
+static void dispc_ovl_write_firhv2_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
+ u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_write_reg(DISPC_OVL_FIR_COEF_HV2(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_HV2(plane, reg), value);
}
-static void dispc_ovl_write_firv2_reg(enum omap_plane_id plane, int reg,
+static void dispc_ovl_write_firv2_reg(struct dispc_device *dispc,
+ enum omap_plane_id plane, int reg,
u32 value)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value);
+ dispc_write_reg(dispc, DISPC_OVL_FIR_COEF_V2(plane, reg), value);
}
-static void dispc_ovl_set_scale_coef(enum omap_plane_id plane, int fir_hinc,
- int fir_vinc, int five_taps,
- enum omap_color_component color_comp)
+static void dispc_ovl_set_scale_coef(struct dispc_device *dispc,
+ enum omap_plane_id plane, int fir_hinc,
+ int fir_vinc, int five_taps,
+ enum omap_color_component color_comp)
{
const struct dispc_coef *h_coef, *v_coef;
int i;
@@ -819,11 +841,11 @@ static void dispc_ovl_set_scale_coef(enum omap_plane_id plane, int fir_hinc,
| FLD_VAL(v_coef[i].hc3_vc2, 31, 24);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
- dispc_ovl_write_firh_reg(plane, i, h);
- dispc_ovl_write_firhv_reg(plane, i, hv);
+ dispc_ovl_write_firh_reg(dispc, plane, i, h);
+ dispc_ovl_write_firhv_reg(dispc, plane, i, hv);
} else {
- dispc_ovl_write_firh2_reg(plane, i, h);
- dispc_ovl_write_firhv2_reg(plane, i, hv);
+ dispc_ovl_write_firh2_reg(dispc, plane, i, h);
+ dispc_ovl_write_firhv2_reg(dispc, plane, i, hv);
}
}
@@ -834,72 +856,113 @@ static void dispc_ovl_set_scale_coef(enum omap_plane_id plane, int fir_hinc,
v = FLD_VAL(v_coef[i].hc0_vc00, 7, 0)
| FLD_VAL(v_coef[i].hc4_vc22, 15, 8);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y)
- dispc_ovl_write_firv_reg(plane, i, v);
+ dispc_ovl_write_firv_reg(dispc, plane, i, v);
else
- dispc_ovl_write_firv2_reg(plane, i, v);
+ dispc_ovl_write_firv2_reg(dispc, plane, i, v);
}
}
}
+struct csc_coef_yuv2rgb {
+ int ry, rcb, rcr, gy, gcb, gcr, by, bcb, bcr;
+ bool full_range;
+};
+
+struct csc_coef_rgb2yuv {
+ int yr, yg, yb, cbr, cbg, cbb, crr, crg, crb;
+ bool full_range;
+};
+
+static void dispc_ovl_write_color_conv_coef(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ const struct csc_coef_yuv2rgb *ct)
+{
+#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
+
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->rcr, ct->ry));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->gy, ct->rcb));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->gcb, ct->gcr));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb));
-static void dispc_ovl_write_color_conv_coef(enum omap_plane_id plane,
- const struct color_conv_coef *ct)
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
+
+#undef CVAL
+}
+
+static void dispc_wb_write_color_conv_coef(struct dispc_device *dispc,
+ const struct csc_coef_rgb2yuv *ct)
{
+ const enum omap_plane_id plane = OMAP_DSS_WB;
+
#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0))
- dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->rcr, ct->ry));
- dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->gy, ct->rcb));
- dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->gcb, ct->gcr));
- dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->bcr, ct->by));
- dispc_write_reg(DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->bcb));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->yg, ct->yr));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->crr, ct->yb));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->crb, ct->crg));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->cbg, ct->cbr));
+ dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->cbb));
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11);
#undef CVAL
}
-static void dispc_setup_color_conv_coef(void)
+static void dispc_setup_color_conv_coef(struct dispc_device *dispc)
{
int i;
- int num_ovl = dispc_get_num_ovls();
- const struct color_conv_coef ctbl_bt601_5_ovl = {
- /* YUV -> RGB */
- 298, 409, 0, 298, -208, -100, 298, 0, 517, 0,
+ int num_ovl = dispc_get_num_ovls(dispc);
+
+ /* YUV -> RGB, ITU-R BT.601, limited range */
+ const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = {
+ 298, 0, 409, /* ry, rcb, rcr */
+ 298, -100, -208, /* gy, gcb, gcr */
+ 298, 516, 0, /* by, bcb, bcr */
+ false, /* limited range */
};
- const struct color_conv_coef ctbl_bt601_5_wb = {
- /* RGB -> YUV */
- 66, 129, 25, 112, -94, -18, -38, -74, 112, 0,
+
+ /* RGB -> YUV, ITU-R BT.601, limited range */
+ const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_lim = {
+ 66, 129, 25, /* yr, yg, yb */
+ -38, -74, 112, /* cbr, cbg, cbb */
+ 112, -94, -18, /* crr, crg, crb */
+ false, /* limited range */
};
for (i = 1; i < num_ovl; i++)
- dispc_ovl_write_color_conv_coef(i, &ctbl_bt601_5_ovl);
+ dispc_ovl_write_color_conv_coef(dispc, i, &coefs_yuv2rgb_bt601_lim);
- if (dispc.feat->has_writeback)
- dispc_ovl_write_color_conv_coef(OMAP_DSS_WB, &ctbl_bt601_5_wb);
+ if (dispc->feat->has_writeback)
+ dispc_wb_write_color_conv_coef(dispc, &coefs_rgb2yuv_bt601_lim);
}
-static void dispc_ovl_set_ba0(enum omap_plane_id plane, u32 paddr)
+static void dispc_ovl_set_ba0(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 paddr)
{
- dispc_write_reg(DISPC_OVL_BA0(plane), paddr);
+ dispc_write_reg(dispc, DISPC_OVL_BA0(plane), paddr);
}
-static void dispc_ovl_set_ba1(enum omap_plane_id plane, u32 paddr)
+static void dispc_ovl_set_ba1(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 paddr)
{
- dispc_write_reg(DISPC_OVL_BA1(plane), paddr);
+ dispc_write_reg(dispc, DISPC_OVL_BA1(plane), paddr);
}
-static void dispc_ovl_set_ba0_uv(enum omap_plane_id plane, u32 paddr)
+static void dispc_ovl_set_ba0_uv(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 paddr)
{
- dispc_write_reg(DISPC_OVL_BA0_UV(plane), paddr);
+ dispc_write_reg(dispc, DISPC_OVL_BA0_UV(plane), paddr);
}
-static void dispc_ovl_set_ba1_uv(enum omap_plane_id plane, u32 paddr)
+static void dispc_ovl_set_ba1_uv(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 paddr)
{
- dispc_write_reg(DISPC_OVL_BA1_UV(plane), paddr);
+ dispc_write_reg(dispc, DISPC_OVL_BA1_UV(plane), paddr);
}
-static void dispc_ovl_set_pos(enum omap_plane_id plane,
- enum omap_overlay_caps caps, int x, int y)
+static void dispc_ovl_set_pos(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps, int x, int y)
{
u32 val;
@@ -908,22 +971,24 @@ static void dispc_ovl_set_pos(enum omap_plane_id plane,
val = FLD_VAL(y, 26, 16) | FLD_VAL(x, 10, 0);
- dispc_write_reg(DISPC_OVL_POSITION(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_POSITION(plane), val);
}
-static void dispc_ovl_set_input_size(enum omap_plane_id plane, int width,
- int height)
+static void dispc_ovl_set_input_size(struct dispc_device *dispc,
+ enum omap_plane_id plane, int width,
+ int height)
{
u32 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
if (plane == OMAP_DSS_GFX || plane == OMAP_DSS_WB)
- dispc_write_reg(DISPC_OVL_SIZE(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_SIZE(plane), val);
else
- dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_PICTURE_SIZE(plane), val);
}
-static void dispc_ovl_set_output_size(enum omap_plane_id plane, int width,
- int height)
+static void dispc_ovl_set_output_size(struct dispc_device *dispc,
+ enum omap_plane_id plane, int width,
+ int height)
{
u32 val;
@@ -932,64 +997,72 @@ static void dispc_ovl_set_output_size(enum omap_plane_id plane, int width,
val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
if (plane == OMAP_DSS_WB)
- dispc_write_reg(DISPC_OVL_PICTURE_SIZE(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_PICTURE_SIZE(plane), val);
else
- dispc_write_reg(DISPC_OVL_SIZE(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_SIZE(plane), val);
}
-static void dispc_ovl_set_zorder(enum omap_plane_id plane,
- enum omap_overlay_caps caps, u8 zorder)
+static void dispc_ovl_set_zorder(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps, u8 zorder)
{
if ((caps & OMAP_DSS_OVL_CAP_ZORDER) == 0)
return;
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), zorder, 27, 26);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), zorder, 27, 26);
}
-static void dispc_ovl_enable_zorder_planes(void)
+static void dispc_ovl_enable_zorder_planes(struct dispc_device *dispc)
{
int i;
- if (!dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
+ if (!dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
return;
- for (i = 0; i < dispc_get_num_ovls(); i++)
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(i), 1, 25, 25);
+ for (i = 0; i < dispc_get_num_ovls(dispc); i++)
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(i), 1, 25, 25);
}
-static void dispc_ovl_set_pre_mult_alpha(enum omap_plane_id plane,
- enum omap_overlay_caps caps, bool enable)
+static void dispc_ovl_set_pre_mult_alpha(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps,
+ bool enable)
{
if ((caps & OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA) == 0)
return;
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 28, 28);
}
-static void dispc_ovl_setup_global_alpha(enum omap_plane_id plane,
- enum omap_overlay_caps caps, u8 global_alpha)
+static void dispc_ovl_setup_global_alpha(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps,
+ u8 global_alpha)
{
- static const unsigned shifts[] = { 0, 8, 16, 24, };
+ static const unsigned int shifts[] = { 0, 8, 16, 24, };
int shift;
if ((caps & OMAP_DSS_OVL_CAP_GLOBAL_ALPHA) == 0)
return;
shift = shifts[plane];
- REG_FLD_MOD(DISPC_GLOBAL_ALPHA, global_alpha, shift + 7, shift);
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_ALPHA, global_alpha, shift + 7, shift);
}
-static void dispc_ovl_set_pix_inc(enum omap_plane_id plane, s32 inc)
+static void dispc_ovl_set_pix_inc(struct dispc_device *dispc,
+ enum omap_plane_id plane, s32 inc)
{
- dispc_write_reg(DISPC_OVL_PIXEL_INC(plane), inc);
+ dispc_write_reg(dispc, DISPC_OVL_PIXEL_INC(plane), inc);
}
-static void dispc_ovl_set_row_inc(enum omap_plane_id plane, s32 inc)
+static void dispc_ovl_set_row_inc(struct dispc_device *dispc,
+ enum omap_plane_id plane, s32 inc)
{
- dispc_write_reg(DISPC_OVL_ROW_INC(plane), inc);
+ dispc_write_reg(dispc, DISPC_OVL_ROW_INC(plane), inc);
}
-static void dispc_ovl_set_color_mode(enum omap_plane_id plane, u32 fourcc)
+static void dispc_ovl_set_color_mode(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 fourcc)
{
u32 m = 0;
if (plane != OMAP_DSS_GFX) {
@@ -1058,7 +1131,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane_id plane, u32 fourcc)
}
}
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
static bool format_is_yuv(u32 fourcc)
@@ -1073,19 +1146,21 @@ static bool format_is_yuv(u32 fourcc)
}
}
-static void dispc_ovl_configure_burst_type(enum omap_plane_id plane,
- enum omap_dss_rotation_type rotation_type)
+static void dispc_ovl_configure_burst_type(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_dss_rotation_type rotation)
{
- if (dispc_has_feature(FEAT_BURST_2D) == 0)
+ if (dispc_has_feature(dispc, FEAT_BURST_2D) == 0)
return;
- if (rotation_type == OMAP_DSS_ROT_TILER)
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
+ if (rotation == OMAP_DSS_ROT_TILER)
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
else
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
}
-static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
+static void dispc_ovl_set_channel_out(struct dispc_device *dispc,
+ enum omap_plane_id plane,
enum omap_channel channel)
{
int shift;
@@ -1106,8 +1181,8 @@ static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
return;
}
- val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
- if (dispc_has_feature(FEAT_MGR_LCD2)) {
+ val = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2)) {
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
chan = 0;
@@ -1122,7 +1197,7 @@ static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
chan2 = 1;
break;
case OMAP_DSS_CHANNEL_LCD3:
- if (dispc_has_feature(FEAT_MGR_LCD3)) {
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3)) {
chan = 0;
chan2 = 2;
} else {
@@ -1144,10 +1219,11 @@ static void dispc_ovl_set_channel_out(enum omap_plane_id plane,
} else {
val = FLD_MOD(val, channel, shift, shift);
}
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), val);
}
-static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane_id plane)
+static enum omap_channel dispc_ovl_get_channel_out(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
int shift;
u32 val;
@@ -1166,12 +1242,12 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane_id plane)
return 0;
}
- val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ val = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
if (FLD_GET(val, shift, shift) == 1)
return OMAP_DSS_CHANNEL_DIGIT;
- if (!dispc_has_feature(FEAT_MGR_LCD2))
+ if (!dispc_has_feature(dispc, FEAT_MGR_LCD2))
return OMAP_DSS_CHANNEL_LCD;
switch (FLD_GET(val, 31, 30)) {
@@ -1187,47 +1263,44 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane_id plane)
}
}
-void dispc_wb_set_channel_in(enum dss_writeback_channel channel)
-{
- enum omap_plane_id plane = OMAP_DSS_WB;
-
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), channel, 18, 16);
-}
-
-static void dispc_ovl_set_burst_size(enum omap_plane_id plane,
- enum omap_burst_size burst_size)
+static void dispc_ovl_set_burst_size(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_burst_size burst_size)
{
- static const unsigned shifts[] = { 6, 14, 14, 14, 14, };
+ static const unsigned int shifts[] = { 6, 14, 14, 14, 14, };
int shift;
shift = shifts[plane];
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), burst_size, shift + 1, shift);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), burst_size,
+ shift + 1, shift);
}
-static void dispc_configure_burst_sizes(void)
+static void dispc_configure_burst_sizes(struct dispc_device *dispc)
{
int i;
const int burst_size = BURST_SIZE_X8;
/* Configure burst size always to maximum size */
- for (i = 0; i < dispc_get_num_ovls(); ++i)
- dispc_ovl_set_burst_size(i, burst_size);
- if (dispc.feat->has_writeback)
- dispc_ovl_set_burst_size(OMAP_DSS_WB, burst_size);
+ for (i = 0; i < dispc_get_num_ovls(dispc); ++i)
+ dispc_ovl_set_burst_size(dispc, i, burst_size);
+ if (dispc->feat->has_writeback)
+ dispc_ovl_set_burst_size(dispc, OMAP_DSS_WB, burst_size);
}
-static u32 dispc_ovl_get_burst_size(enum omap_plane_id plane)
+static u32 dispc_ovl_get_burst_size(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
/* burst multiplier is always x8 (see dispc_configure_burst_sizes()) */
- return dispc.feat->burst_size_unit * 8;
+ return dispc->feat->burst_size_unit * 8;
}
-static bool dispc_ovl_color_mode_supported(enum omap_plane_id plane, u32 fourcc)
+static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 fourcc)
{
const u32 *modes;
unsigned int i;
- modes = dispc.feat->supported_color_modes[plane];
+ modes = dispc->feat->supported_color_modes[plane];
for (i = 0; modes[i]; ++i) {
if (modes[i] == fourcc)
@@ -1237,21 +1310,24 @@ static bool dispc_ovl_color_mode_supported(enum omap_plane_id plane, u32 fourcc)
return false;
}
-static const u32 *dispc_ovl_get_color_modes(enum omap_plane_id plane)
+static const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
- return dispc.feat->supported_color_modes[plane];
+ return dispc->feat->supported_color_modes[plane];
}
-static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_cpr(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable)
{
if (channel == OMAP_DSS_CHANNEL_DIGIT)
return;
- mgr_fld_write(channel, DISPC_MGR_FLD_CPR, enable);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_CPR, enable);
}
-static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
- const struct omap_dss_cpr_coefs *coefs)
+static void dispc_mgr_set_cpr_coef(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct omap_dss_cpr_coefs *coefs)
{
u32 coef_r, coef_g, coef_b;
@@ -1265,48 +1341,50 @@ static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
coef_b = FLD_VAL(coefs->br, 31, 22) | FLD_VAL(coefs->bg, 20, 11) |
FLD_VAL(coefs->bb, 9, 0);
- dispc_write_reg(DISPC_CPR_COEF_R(channel), coef_r);
- dispc_write_reg(DISPC_CPR_COEF_G(channel), coef_g);
- dispc_write_reg(DISPC_CPR_COEF_B(channel), coef_b);
+ dispc_write_reg(dispc, DISPC_CPR_COEF_R(channel), coef_r);
+ dispc_write_reg(dispc, DISPC_CPR_COEF_G(channel), coef_g);
+ dispc_write_reg(dispc, DISPC_CPR_COEF_B(channel), coef_b);
}
-static void dispc_ovl_set_vid_color_conv(enum omap_plane_id plane,
- bool enable)
+static void dispc_ovl_set_vid_color_conv(struct dispc_device *dispc,
+ enum omap_plane_id plane, bool enable)
{
u32 val;
BUG_ON(plane == OMAP_DSS_GFX);
- val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ val = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
val = FLD_MOD(val, enable, 9, 9);
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), val);
}
-static void dispc_ovl_enable_replication(enum omap_plane_id plane,
- enum omap_overlay_caps caps, bool enable)
+static void dispc_ovl_enable_replication(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps,
+ bool enable)
{
- static const unsigned shifts[] = { 5, 10, 10, 10 };
+ static const unsigned int shifts[] = { 5, 10, 10, 10 };
int shift;
if ((caps & OMAP_DSS_OVL_CAP_REPLICATION) == 0)
return;
shift = shifts[plane];
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
}
-static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
- u16 height)
+static void dispc_mgr_set_size(struct dispc_device *dispc,
+ enum omap_channel channel, u16 width, u16 height)
{
u32 val;
- val = FLD_VAL(height - 1, dispc.feat->mgr_height_start, 16) |
- FLD_VAL(width - 1, dispc.feat->mgr_width_start, 0);
+ val = FLD_VAL(height - 1, dispc->feat->mgr_height_start, 16) |
+ FLD_VAL(width - 1, dispc->feat->mgr_width_start, 0);
- dispc_write_reg(DISPC_SIZE_MGR(channel), val);
+ dispc_write_reg(dispc, DISPC_SIZE_MGR(channel), val);
}
-static void dispc_init_fifos(void)
+static void dispc_init_fifos(struct dispc_device *dispc)
{
u32 size;
int fifo;
@@ -1314,20 +1392,21 @@ static void dispc_init_fifos(void)
u32 unit;
int i;
- unit = dispc.feat->buffer_size_unit;
+ unit = dispc->feat->buffer_size_unit;
- dispc_get_reg_field(FEAT_REG_FIFOSIZE, &start, &end);
+ dispc_get_reg_field(dispc, FEAT_REG_FIFOSIZE, &start, &end);
- for (fifo = 0; fifo < dispc.feat->num_fifos; ++fifo) {
- size = REG_GET(DISPC_OVL_FIFO_SIZE_STATUS(fifo), start, end);
+ for (fifo = 0; fifo < dispc->feat->num_fifos; ++fifo) {
+ size = REG_GET(dispc, DISPC_OVL_FIFO_SIZE_STATUS(fifo),
+ start, end);
size *= unit;
- dispc.fifo_size[fifo] = size;
+ dispc->fifo_size[fifo] = size;
/*
* By default fifos are mapped directly to overlays, fifo 0 to
* ovl 0, fifo 1 to ovl 1, etc.
*/
- dispc.fifo_assignment[fifo] = fifo;
+ dispc->fifo_assignment[fifo] = fifo;
}
/*
@@ -1337,68 +1416,71 @@ static void dispc_init_fifos(void)
* giving GFX plane a larger fifo. WB but should work fine with a
* smaller fifo.
*/
- if (dispc.feat->gfx_fifo_workaround) {
+ if (dispc->feat->gfx_fifo_workaround) {
u32 v;
- v = dispc_read_reg(DISPC_GLOBAL_BUFFER);
+ v = dispc_read_reg(dispc, DISPC_GLOBAL_BUFFER);
v = FLD_MOD(v, 4, 2, 0); /* GFX BUF top to WB */
v = FLD_MOD(v, 4, 5, 3); /* GFX BUF bottom to WB */
v = FLD_MOD(v, 0, 26, 24); /* WB BUF top to GFX */
v = FLD_MOD(v, 0, 29, 27); /* WB BUF bottom to GFX */
- dispc_write_reg(DISPC_GLOBAL_BUFFER, v);
+ dispc_write_reg(dispc, DISPC_GLOBAL_BUFFER, v);
- dispc.fifo_assignment[OMAP_DSS_GFX] = OMAP_DSS_WB;
- dispc.fifo_assignment[OMAP_DSS_WB] = OMAP_DSS_GFX;
+ dispc->fifo_assignment[OMAP_DSS_GFX] = OMAP_DSS_WB;
+ dispc->fifo_assignment[OMAP_DSS_WB] = OMAP_DSS_GFX;
}
/*
* Setup default fifo thresholds.
*/
- for (i = 0; i < dispc_get_num_ovls(); ++i) {
+ for (i = 0; i < dispc_get_num_ovls(dispc); ++i) {
u32 low, high;
const bool use_fifomerge = false;
const bool manual_update = false;
- dispc_ovl_compute_fifo_thresholds(i, &low, &high,
- use_fifomerge, manual_update);
+ dispc_ovl_compute_fifo_thresholds(dispc, i, &low, &high,
+ use_fifomerge, manual_update);
- dispc_ovl_set_fifo_threshold(i, low, high);
+ dispc_ovl_set_fifo_threshold(dispc, i, low, high);
}
- if (dispc.feat->has_writeback) {
+ if (dispc->feat->has_writeback) {
u32 low, high;
const bool use_fifomerge = false;
const bool manual_update = false;
- dispc_ovl_compute_fifo_thresholds(OMAP_DSS_WB, &low, &high,
- use_fifomerge, manual_update);
+ dispc_ovl_compute_fifo_thresholds(dispc, OMAP_DSS_WB,
+ &low, &high, use_fifomerge,
+ manual_update);
- dispc_ovl_set_fifo_threshold(OMAP_DSS_WB, low, high);
+ dispc_ovl_set_fifo_threshold(dispc, OMAP_DSS_WB, low, high);
}
}
-static u32 dispc_ovl_get_fifo_size(enum omap_plane_id plane)
+static u32 dispc_ovl_get_fifo_size(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
int fifo;
u32 size = 0;
- for (fifo = 0; fifo < dispc.feat->num_fifos; ++fifo) {
- if (dispc.fifo_assignment[fifo] == plane)
- size += dispc.fifo_size[fifo];
+ for (fifo = 0; fifo < dispc->feat->num_fifos; ++fifo) {
+ if (dispc->fifo_assignment[fifo] == plane)
+ size += dispc->fifo_size[fifo];
}
return size;
}
-void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
- u32 high)
+void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u32 low, u32 high)
{
u8 hi_start, hi_end, lo_start, lo_end;
u32 unit;
- unit = dispc.feat->buffer_size_unit;
+ unit = dispc->feat->buffer_size_unit;
WARN_ON(low % unit != 0);
WARN_ON(high % unit != 0);
@@ -1406,18 +1488,20 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
low /= unit;
high /= unit;
- dispc_get_reg_field(FEAT_REG_FIFOHIGHTHRESHOLD, &hi_start, &hi_end);
- dispc_get_reg_field(FEAT_REG_FIFOLOWTHRESHOLD, &lo_start, &lo_end);
+ dispc_get_reg_field(dispc, FEAT_REG_FIFOHIGHTHRESHOLD,
+ &hi_start, &hi_end);
+ dispc_get_reg_field(dispc, FEAT_REG_FIFOLOWTHRESHOLD,
+ &lo_start, &lo_end);
DSSDBG("fifo(%d) threshold (bytes), old %u/%u, new %u/%u\n",
plane,
- REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
+ REG_GET(dispc, DISPC_OVL_FIFO_THRESHOLD(plane),
lo_start, lo_end) * unit,
- REG_GET(DISPC_OVL_FIFO_THRESHOLD(plane),
+ REG_GET(dispc, DISPC_OVL_FIFO_THRESHOLD(plane),
hi_start, hi_end) * unit,
low * unit, high * unit);
- dispc_write_reg(DISPC_OVL_FIFO_THRESHOLD(plane),
+ dispc_write_reg(dispc, DISPC_OVL_FIFO_THRESHOLD(plane),
FLD_VAL(high, hi_start, hi_end) |
FLD_VAL(low, lo_start, lo_end));
@@ -1426,42 +1510,43 @@ void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
* large for the preload field, set the threshold to the maximum value
* that can be held by the preload register
*/
- if (dispc_has_feature(FEAT_PRELOAD) && dispc.feat->set_max_preload &&
- plane != OMAP_DSS_WB)
- dispc_write_reg(DISPC_OVL_PRELOAD(plane), min(high, 0xfffu));
+ if (dispc_has_feature(dispc, FEAT_PRELOAD) &&
+ dispc->feat->set_max_preload && plane != OMAP_DSS_WB)
+ dispc_write_reg(dispc, DISPC_OVL_PRELOAD(plane),
+ min(high, 0xfffu));
}
-void dispc_enable_fifomerge(bool enable)
+void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable)
{
- if (!dispc_has_feature(FEAT_FIFO_MERGE)) {
+ if (!dispc_has_feature(dispc, FEAT_FIFO_MERGE)) {
WARN_ON(enable);
return;
}
DSSDBG("FIFO merge %s\n", enable ? "enabled" : "disabled");
- REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 14, 14);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, enable ? 1 : 0, 14, 14);
}
-void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
- u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
- bool manual_update)
+void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u32 *fifo_low, u32 *fifo_high,
+ bool use_fifomerge, bool manual_update)
{
/*
* All sizes are in bytes. Both the buffer and burst are made of
* buffer_units, and the fifo thresholds must be buffer_unit aligned.
*/
-
- unsigned buf_unit = dispc.feat->buffer_size_unit;
- unsigned ovl_fifo_size, total_fifo_size, burst_size;
+ unsigned int buf_unit = dispc->feat->buffer_size_unit;
+ unsigned int ovl_fifo_size, total_fifo_size, burst_size;
int i;
- burst_size = dispc_ovl_get_burst_size(plane);
- ovl_fifo_size = dispc_ovl_get_fifo_size(plane);
+ burst_size = dispc_ovl_get_burst_size(dispc, plane);
+ ovl_fifo_size = dispc_ovl_get_fifo_size(dispc, plane);
if (use_fifomerge) {
total_fifo_size = 0;
- for (i = 0; i < dispc_get_num_ovls(); ++i)
- total_fifo_size += dispc_ovl_get_fifo_size(i);
+ for (i = 0; i < dispc_get_num_ovls(dispc); ++i)
+ total_fifo_size += dispc_ovl_get_fifo_size(dispc, i);
} else {
total_fifo_size = ovl_fifo_size;
}
@@ -1472,7 +1557,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
* combined fifo size
*/
- if (manual_update && dispc_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
+ if (manual_update && dispc_has_feature(dispc, FEAT_OMAP3_DSI_FIFO_BUG)) {
*fifo_low = ovl_fifo_size - burst_size * 2;
*fifo_high = total_fifo_size - burst_size;
} else if (plane == OMAP_DSS_WB) {
@@ -1489,7 +1574,8 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
}
}
-static void dispc_ovl_set_mflag(enum omap_plane_id plane, bool enable)
+static void dispc_ovl_set_mflag(struct dispc_device *dispc,
+ enum omap_plane_id plane, bool enable)
{
int bit;
@@ -1498,17 +1584,18 @@ static void dispc_ovl_set_mflag(enum omap_plane_id plane, bool enable)
else
bit = 23;
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable, bit, bit);
}
-static void dispc_ovl_set_mflag_threshold(enum omap_plane_id plane,
- int low, int high)
+static void dispc_ovl_set_mflag_threshold(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ int low, int high)
{
- dispc_write_reg(DISPC_OVL_MFLAG_THRESHOLD(plane),
+ dispc_write_reg(dispc, DISPC_OVL_MFLAG_THRESHOLD(plane),
FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
}
-static void dispc_init_mflag(void)
+static void dispc_init_mflag(struct dispc_device *dispc)
{
int i;
@@ -1522,16 +1609,16 @@ static void dispc_init_mflag(void)
*
* As a work-around, set force MFLAG to always on.
*/
- dispc_write_reg(DISPC_GLOBAL_MFLAG_ATTRIBUTE,
+ dispc_write_reg(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE,
(1 << 0) | /* MFLAG_CTRL = force always on */
(0 << 2)); /* MFLAG_START = disable */
- for (i = 0; i < dispc_get_num_ovls(); ++i) {
- u32 size = dispc_ovl_get_fifo_size(i);
- u32 unit = dispc.feat->buffer_size_unit;
+ for (i = 0; i < dispc_get_num_ovls(dispc); ++i) {
+ u32 size = dispc_ovl_get_fifo_size(dispc, i);
+ u32 unit = dispc->feat->buffer_size_unit;
u32 low, high;
- dispc_ovl_set_mflag(i, true);
+ dispc_ovl_set_mflag(dispc, i, true);
/*
* Simulation team suggests below thesholds:
@@ -1542,15 +1629,15 @@ static void dispc_init_mflag(void)
low = size * 4 / 8 / unit;
high = size * 5 / 8 / unit;
- dispc_ovl_set_mflag_threshold(i, low, high);
+ dispc_ovl_set_mflag_threshold(dispc, i, low, high);
}
- if (dispc.feat->has_writeback) {
- u32 size = dispc_ovl_get_fifo_size(OMAP_DSS_WB);
- u32 unit = dispc.feat->buffer_size_unit;
+ if (dispc->feat->has_writeback) {
+ u32 size = dispc_ovl_get_fifo_size(dispc, OMAP_DSS_WB);
+ u32 unit = dispc->feat->buffer_size_unit;
u32 low, high;
- dispc_ovl_set_mflag(OMAP_DSS_WB, true);
+ dispc_ovl_set_mflag(dispc, OMAP_DSS_WB, true);
/*
* Simulation team suggests below thesholds:
@@ -1561,98 +1648,112 @@ static void dispc_init_mflag(void)
low = size * 4 / 8 / unit;
high = size * 5 / 8 / unit;
- dispc_ovl_set_mflag_threshold(OMAP_DSS_WB, low, high);
+ dispc_ovl_set_mflag_threshold(dispc, OMAP_DSS_WB, low, high);
}
}
-static void dispc_ovl_set_fir(enum omap_plane_id plane,
- int hinc, int vinc,
- enum omap_color_component color_comp)
+static void dispc_ovl_set_fir(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ int hinc, int vinc,
+ enum omap_color_component color_comp)
{
u32 val;
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
u8 hinc_start, hinc_end, vinc_start, vinc_end;
- dispc_get_reg_field(FEAT_REG_FIRHINC, &hinc_start, &hinc_end);
- dispc_get_reg_field(FEAT_REG_FIRVINC, &vinc_start, &vinc_end);
+ dispc_get_reg_field(dispc, FEAT_REG_FIRHINC,
+ &hinc_start, &hinc_end);
+ dispc_get_reg_field(dispc, FEAT_REG_FIRVINC,
+ &vinc_start, &vinc_end);
val = FLD_VAL(vinc, vinc_start, vinc_end) |
FLD_VAL(hinc, hinc_start, hinc_end);
- dispc_write_reg(DISPC_OVL_FIR(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_FIR(plane), val);
} else {
val = FLD_VAL(vinc, 28, 16) | FLD_VAL(hinc, 12, 0);
- dispc_write_reg(DISPC_OVL_FIR2(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_FIR2(plane), val);
}
}
-static void dispc_ovl_set_vid_accu0(enum omap_plane_id plane, int haccu,
+static void dispc_ovl_set_vid_accu0(struct dispc_device *dispc,
+ enum omap_plane_id plane, int haccu,
int vaccu)
{
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
- dispc_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
- dispc_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+ dispc_get_reg_field(dispc, FEAT_REG_HORIZONTALACCU,
+ &hor_start, &hor_end);
+ dispc_get_reg_field(dispc, FEAT_REG_VERTICALACCU,
+ &vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
- dispc_write_reg(DISPC_OVL_ACCU0(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ACCU0(plane), val);
}
-static void dispc_ovl_set_vid_accu1(enum omap_plane_id plane, int haccu,
+static void dispc_ovl_set_vid_accu1(struct dispc_device *dispc,
+ enum omap_plane_id plane, int haccu,
int vaccu)
{
u32 val;
u8 hor_start, hor_end, vert_start, vert_end;
- dispc_get_reg_field(FEAT_REG_HORIZONTALACCU, &hor_start, &hor_end);
- dispc_get_reg_field(FEAT_REG_VERTICALACCU, &vert_start, &vert_end);
+ dispc_get_reg_field(dispc, FEAT_REG_HORIZONTALACCU,
+ &hor_start, &hor_end);
+ dispc_get_reg_field(dispc, FEAT_REG_VERTICALACCU,
+ &vert_start, &vert_end);
val = FLD_VAL(vaccu, vert_start, vert_end) |
FLD_VAL(haccu, hor_start, hor_end);
- dispc_write_reg(DISPC_OVL_ACCU1(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ACCU1(plane), val);
}
-static void dispc_ovl_set_vid_accu2_0(enum omap_plane_id plane, int haccu,
- int vaccu)
+static void dispc_ovl_set_vid_accu2_0(struct dispc_device *dispc,
+ enum omap_plane_id plane, int haccu,
+ int vaccu)
{
u32 val;
val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
- dispc_write_reg(DISPC_OVL_ACCU2_0(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ACCU2_0(plane), val);
}
-static void dispc_ovl_set_vid_accu2_1(enum omap_plane_id plane, int haccu,
- int vaccu)
+static void dispc_ovl_set_vid_accu2_1(struct dispc_device *dispc,
+ enum omap_plane_id plane, int haccu,
+ int vaccu)
{
u32 val;
val = FLD_VAL(vaccu, 26, 16) | FLD_VAL(haccu, 10, 0);
- dispc_write_reg(DISPC_OVL_ACCU2_1(plane), val);
+ dispc_write_reg(dispc, DISPC_OVL_ACCU2_1(plane), val);
}
-static void dispc_ovl_set_scale_param(enum omap_plane_id plane,
- u16 orig_width, u16 orig_height,
- u16 out_width, u16 out_height,
- bool five_taps, u8 rotation,
- enum omap_color_component color_comp)
+static void dispc_ovl_set_scale_param(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool five_taps, u8 rotation,
+ enum omap_color_component color_comp)
{
int fir_hinc, fir_vinc;
fir_hinc = 1024 * orig_width / out_width;
fir_vinc = 1024 * orig_height / out_height;
- dispc_ovl_set_scale_coef(plane, fir_hinc, fir_vinc, five_taps,
- color_comp);
- dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
+ dispc_ovl_set_scale_coef(dispc, plane, fir_hinc, fir_vinc, five_taps,
+ color_comp);
+ dispc_ovl_set_fir(dispc, plane, fir_hinc, fir_vinc, color_comp);
}
-static void dispc_ovl_set_accu_uv(enum omap_plane_id plane,
- u16 orig_width, u16 orig_height, u16 out_width, u16 out_height,
- bool ilace, u32 fourcc, u8 rotation)
+static void dispc_ovl_set_accu_uv(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool ilace, u32 fourcc, u8 rotation)
{
int h_accu2_0, h_accu2_1;
int v_accu2_0, v_accu2_1;
@@ -1733,25 +1834,26 @@ static void dispc_ovl_set_accu_uv(enum omap_plane_id plane,
v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024;
v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024;
- dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0);
- dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1);
+ dispc_ovl_set_vid_accu2_0(dispc, plane, h_accu2_0, v_accu2_0);
+ dispc_ovl_set_vid_accu2_1(dispc, plane, h_accu2_1, v_accu2_1);
}
-static void dispc_ovl_set_scaling_common(enum omap_plane_id plane,
- u16 orig_width, u16 orig_height,
- u16 out_width, u16 out_height,
- bool ilace, bool five_taps,
- bool fieldmode, u32 fourcc,
- u8 rotation)
+static void dispc_ovl_set_scaling_common(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool ilace, bool five_taps,
+ bool fieldmode, u32 fourcc,
+ u8 rotation)
{
int accu0 = 0;
int accu1 = 0;
u32 l;
- dispc_ovl_set_scale_param(plane, orig_width, orig_height,
- out_width, out_height, five_taps,
- rotation, DISPC_COLOR_COMPONENT_RGB_Y);
- l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ dispc_ovl_set_scale_param(dispc, plane, orig_width, orig_height,
+ out_width, out_height, five_taps,
+ rotation, DISPC_COLOR_COMPONENT_RGB_Y);
+ l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
/* RESIZEENABLE and VERTICALTAPS */
l &= ~((0x3 << 5) | (0x1 << 21));
@@ -1760,19 +1862,19 @@ static void dispc_ovl_set_scaling_common(enum omap_plane_id plane,
l |= five_taps ? (1 << 21) : 0;
/* VRESIZECONF and HRESIZECONF */
- if (dispc_has_feature(FEAT_RESIZECONF)) {
+ if (dispc_has_feature(dispc, FEAT_RESIZECONF)) {
l &= ~(0x3 << 7);
l |= (orig_width <= out_width) ? 0 : (1 << 7);
l |= (orig_height <= out_height) ? 0 : (1 << 8);
}
/* LINEBUFFERSPLIT */
- if (dispc_has_feature(FEAT_LINEBUFFERSPLIT)) {
+ if (dispc_has_feature(dispc, FEAT_LINEBUFFERSPLIT)) {
l &= ~(0x1 << 22);
l |= five_taps ? (1 << 22) : 0;
}
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
+ dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l);
/*
* field 0 = even field = bottom field
@@ -1787,33 +1889,35 @@ static void dispc_ovl_set_scaling_common(enum omap_plane_id plane,
}
}
- dispc_ovl_set_vid_accu0(plane, 0, accu0);
- dispc_ovl_set_vid_accu1(plane, 0, accu1);
+ dispc_ovl_set_vid_accu0(dispc, plane, 0, accu0);
+ dispc_ovl_set_vid_accu1(dispc, plane, 0, accu1);
}
-static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
- u16 orig_width, u16 orig_height,
- u16 out_width, u16 out_height,
- bool ilace, bool five_taps,
- bool fieldmode, u32 fourcc,
- u8 rotation)
+static void dispc_ovl_set_scaling_uv(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool ilace, bool five_taps,
+ bool fieldmode, u32 fourcc,
+ u8 rotation)
{
int scale_x = out_width != orig_width;
int scale_y = out_height != orig_height;
bool chroma_upscale = plane != OMAP_DSS_WB;
- if (!dispc_has_feature(FEAT_HANDLE_UV_SEPARATE))
+ if (!dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE))
return;
if (!format_is_yuv(fourcc)) {
/* reset chroma resampling for RGB formats */
if (plane != OMAP_DSS_WB)
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane),
+ 0, 8, 8);
return;
}
- dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width,
- out_height, ilace, fourcc, rotation);
+ dispc_ovl_set_accu_uv(dispc, plane, orig_width, orig_height, out_width,
+ out_height, ilace, fourcc, rotation);
switch (fourcc) {
case DRM_FORMAT_NV12:
@@ -1855,46 +1959,43 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane_id plane,
if (out_height != orig_height)
scale_y = true;
- dispc_ovl_set_scale_param(plane, orig_width, orig_height,
- out_width, out_height, five_taps,
- rotation, DISPC_COLOR_COMPONENT_UV);
+ dispc_ovl_set_scale_param(dispc, plane, orig_width, orig_height,
+ out_width, out_height, five_taps,
+ rotation, DISPC_COLOR_COMPONENT_UV);
if (plane != OMAP_DSS_WB)
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane),
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane),
(scale_x || scale_y) ? 1 : 0, 8, 8);
/* set H scaling */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
/* set V scaling */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
}
-static void dispc_ovl_set_scaling(enum omap_plane_id plane,
- u16 orig_width, u16 orig_height,
- u16 out_width, u16 out_height,
- bool ilace, bool five_taps,
- bool fieldmode, u32 fourcc,
- u8 rotation)
+static void dispc_ovl_set_scaling(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u16 orig_width, u16 orig_height,
+ u16 out_width, u16 out_height,
+ bool ilace, bool five_taps,
+ bool fieldmode, u32 fourcc,
+ u8 rotation)
{
BUG_ON(plane == OMAP_DSS_GFX);
- dispc_ovl_set_scaling_common(plane,
- orig_width, orig_height,
- out_width, out_height,
- ilace, five_taps,
- fieldmode, fourcc,
- rotation);
+ dispc_ovl_set_scaling_common(dispc, plane, orig_width, orig_height,
+ out_width, out_height, ilace, five_taps,
+ fieldmode, fourcc, rotation);
- dispc_ovl_set_scaling_uv(plane,
- orig_width, orig_height,
- out_width, out_height,
- ilace, five_taps,
- fieldmode, fourcc,
- rotation);
+ dispc_ovl_set_scaling_uv(dispc, plane, orig_width, orig_height,
+ out_width, out_height, ilace, five_taps,
+ fieldmode, fourcc, rotation);
}
-static void dispc_ovl_set_rotation_attrs(enum omap_plane_id plane, u8 rotation,
- enum omap_dss_rotation_type rotation_type, u32 fourcc)
+static void dispc_ovl_set_rotation_attrs(struct dispc_device *dispc,
+ enum omap_plane_id plane, u8 rotation,
+ enum omap_dss_rotation_type rotation_type,
+ u32 fourcc)
{
bool row_repeat = false;
int vidrot = 0;
@@ -1948,19 +2049,20 @@ static void dispc_ovl_set_rotation_attrs(enum omap_plane_id plane, u8 rotation,
if (fourcc == DRM_FORMAT_NV12 && rotation_type != OMAP_DSS_ROT_TILER)
vidrot = 1;
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
- if (dispc_has_feature(FEAT_ROWREPEATENABLE))
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane),
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), vidrot, 13, 12);
+ if (dispc_has_feature(dispc, FEAT_ROWREPEATENABLE))
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane),
row_repeat ? 1 : 0, 18, 18);
- if (dispc_ovl_color_mode_supported(plane, DRM_FORMAT_NV12)) {
+ if (dispc_ovl_color_mode_supported(dispc, plane, DRM_FORMAT_NV12)) {
bool doublestride =
fourcc == DRM_FORMAT_NV12 &&
rotation_type == OMAP_DSS_ROT_TILER &&
!drm_rotation_90_or_270(rotation);
/* DOUBLESTRIDE */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), doublestride, 22, 22);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane),
+ doublestride, 22, 22);
}
}
@@ -2006,8 +2108,8 @@ static s32 pixinc(int pixels, u8 ps)
}
static void calc_offset(u16 screen_width, u16 width,
- u32 fourcc, bool fieldmode,
- unsigned int field_offset, unsigned *offset0, unsigned *offset1,
+ u32 fourcc, bool fieldmode, unsigned int field_offset,
+ unsigned int *offset0, unsigned int *offset1,
s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim,
enum omap_dss_rotation_type rotation_type, u8 rotation)
{
@@ -2197,27 +2299,31 @@ static unsigned long calc_core_clk_44xx(unsigned long pclk, u16 width,
return pclk;
}
-static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
- const struct videomode *vm,
- u16 width, u16 height, u16 out_width, u16 out_height,
- u32 fourcc, bool *five_taps,
- int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
- u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
+static int dispc_ovl_calc_scaling_24xx(struct dispc_device *dispc,
+ unsigned long pclk, unsigned long lclk,
+ const struct videomode *vm,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ u32 fourcc, bool *five_taps,
+ int *x_predecim, int *y_predecim,
+ int *decim_x, int *decim_y,
+ u16 pos_x, unsigned long *core_clk,
+ bool mem_to_mem)
{
int error;
u16 in_width, in_height;
int min_factor = min(*decim_x, *decim_y);
- const int maxsinglelinewidth = dispc.feat->max_line_width;
+ const int maxsinglelinewidth = dispc->feat->max_line_width;
*five_taps = false;
do {
in_height = height / *decim_y;
in_width = width / *decim_x;
- *core_clk = dispc.feat->calc_core_clk(pclk, in_width,
+ *core_clk = dispc->feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
- *core_clk > dispc_core_clk_rate());
+ *core_clk > dispc_core_clk_rate(dispc));
if (error) {
if (*decim_x == *decim_y) {
*decim_x = min_factor;
@@ -2242,16 +2348,20 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
return 0;
}
-static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
- const struct videomode *vm,
- u16 width, u16 height, u16 out_width, u16 out_height,
- u32 fourcc, bool *five_taps,
- int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
- u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
+static int dispc_ovl_calc_scaling_34xx(struct dispc_device *dispc,
+ unsigned long pclk, unsigned long lclk,
+ const struct videomode *vm,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ u32 fourcc, bool *five_taps,
+ int *x_predecim, int *y_predecim,
+ int *decim_x, int *decim_y,
+ u16 pos_x, unsigned long *core_clk,
+ bool mem_to_mem)
{
int error;
u16 in_width, in_height;
- const int maxsinglelinewidth = dispc.feat->max_line_width;
+ const int maxsinglelinewidth = dispc->feat->max_line_width;
do {
in_height = height / *decim_y;
@@ -2268,7 +2378,7 @@ again:
in_width, in_height, out_width,
out_height, fourcc);
else
- *core_clk = dispc.feat->calc_core_clk(pclk, in_width,
+ *core_clk = dispc->feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height,
mem_to_mem);
@@ -2282,7 +2392,7 @@ again:
error = (error || in_width > maxsinglelinewidth * 2 ||
(in_width > maxsinglelinewidth && *five_taps) ||
- !*core_clk || *core_clk > dispc_core_clk_rate());
+ !*core_clk || *core_clk > dispc_core_clk_rate(dispc));
if (!error) {
/* verify that we're inside the limits of scaler */
@@ -2326,24 +2436,28 @@ again:
return 0;
}
-static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
- const struct videomode *vm,
- u16 width, u16 height, u16 out_width, u16 out_height,
- u32 fourcc, bool *five_taps,
- int *x_predecim, int *y_predecim, int *decim_x, int *decim_y,
- u16 pos_x, unsigned long *core_clk, bool mem_to_mem)
+static int dispc_ovl_calc_scaling_44xx(struct dispc_device *dispc,
+ unsigned long pclk, unsigned long lclk,
+ const struct videomode *vm,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ u32 fourcc, bool *five_taps,
+ int *x_predecim, int *y_predecim,
+ int *decim_x, int *decim_y,
+ u16 pos_x, unsigned long *core_clk,
+ bool mem_to_mem)
{
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
u16 in_height = height / *decim_y;
- const int maxsinglelinewidth = dispc.feat->max_line_width;
- const int maxdownscale = dispc.feat->max_downscale;
+ const int maxsinglelinewidth = dispc->feat->max_line_width;
+ const int maxdownscale = dispc->feat->max_downscale;
if (mem_to_mem) {
in_width_max = out_width * maxdownscale;
} else {
- in_width_max = dispc_core_clk_rate() /
- DIV_ROUND_UP(pclk, out_width);
+ in_width_max = dispc_core_clk_rate(dispc)
+ / DIV_ROUND_UP(pclk, out_width);
}
*decim_x = DIV_ROUND_UP(width, in_width_max);
@@ -2381,7 +2495,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
return -EINVAL;
}
- *core_clk = dispc.feat->calc_core_clk(pclk, in_width, in_height,
+ *core_clk = dispc->feat->calc_core_clk(pclk, in_width, in_height,
out_width, out_height, mem_to_mem);
return 0;
}
@@ -2389,15 +2503,20 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
#define DIV_FRAC(dividend, divisor) \
((dividend) * 100 / (divisor) - ((dividend) / (divisor) * 100))
-static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
- enum omap_overlay_caps caps,
- const struct videomode *vm,
- u16 width, u16 height, u16 out_width, u16 out_height,
- u32 fourcc, bool *five_taps,
- int *x_predecim, int *y_predecim, u16 pos_x,
- enum omap_dss_rotation_type rotation_type, bool mem_to_mem)
-{
- const int maxdownscale = dispc.feat->max_downscale;
+static int dispc_ovl_calc_scaling(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ unsigned long pclk, unsigned long lclk,
+ enum omap_overlay_caps caps,
+ const struct videomode *vm,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ u32 fourcc, bool *five_taps,
+ int *x_predecim, int *y_predecim, u16 pos_x,
+ enum omap_dss_rotation_type rotation_type,
+ bool mem_to_mem)
+{
+ int maxhdownscale = dispc->feat->max_downscale;
+ int maxvdownscale = dispc->feat->max_downscale;
const int max_decim_limit = 16;
unsigned long core_clk = 0;
int decim_x, decim_y, ret;
@@ -2405,6 +2524,20 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (width == out_width && height == out_height)
return 0;
+ if (plane == OMAP_DSS_WB) {
+ switch (fourcc) {
+ case DRM_FORMAT_NV12:
+ maxhdownscale = maxvdownscale = 2;
+ break;
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ maxhdownscale = 2;
+ maxvdownscale = 4;
+ break;
+ default:
+ break;
+ }
+ }
if (!mem_to_mem && (pclk == 0 || vm->pixelclock == 0)) {
DSSERR("cannot calculate scaling settings: pclk is zero\n");
return -EINVAL;
@@ -2418,12 +2551,12 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
} else {
*x_predecim = max_decim_limit;
*y_predecim = (rotation_type == OMAP_DSS_ROT_TILER &&
- dispc_has_feature(FEAT_BURST_2D)) ?
+ dispc_has_feature(dispc, FEAT_BURST_2D)) ?
2 : max_decim_limit;
}
- decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
- decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
+ decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxhdownscale);
+ decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxvdownscale);
if (decim_x > *x_predecim || out_width > width * 8)
return -EINVAL;
@@ -2431,10 +2564,11 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
if (decim_y > *y_predecim || out_height > height * 8)
return -EINVAL;
- ret = dispc.feat->calc_scaling(pclk, lclk, vm, width, height,
- out_width, out_height, fourcc, five_taps,
- x_predecim, y_predecim, &decim_x, &decim_y, pos_x, &core_clk,
- mem_to_mem);
+ ret = dispc->feat->calc_scaling(dispc, pclk, lclk, vm, width, height,
+ out_width, out_height, fourcc,
+ five_taps, x_predecim, y_predecim,
+ &decim_x, &decim_y, pos_x, &core_clk,
+ mem_to_mem);
if (ret)
return ret;
@@ -2450,13 +2584,13 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
out_height / (height / decim_y), DIV_FRAC(out_height, height / decim_y),
*five_taps ? 5 : 3,
- core_clk, dispc_core_clk_rate());
+ core_clk, dispc_core_clk_rate(dispc));
- if (!core_clk || core_clk > dispc_core_clk_rate()) {
+ if (!core_clk || core_clk > dispc_core_clk_rate(dispc)) {
DSSERR("failed to set up scaling, "
"required core clk rate = %lu Hz, "
"current core clk rate = %lu Hz\n",
- core_clk, dispc_core_clk_rate());
+ core_clk, dispc_core_clk_rate(dispc));
return -EINVAL;
}
@@ -2465,19 +2599,23 @@ static int dispc_ovl_calc_scaling(unsigned long pclk, unsigned long lclk,
return 0;
}
-static int dispc_ovl_setup_common(enum omap_plane_id plane,
- enum omap_overlay_caps caps, u32 paddr, u32 p_uv_addr,
- u16 screen_width, int pos_x, int pos_y, u16 width, u16 height,
- u16 out_width, u16 out_height, u32 fourcc,
- u8 rotation, u8 zorder, u8 pre_mult_alpha,
- u8 global_alpha, enum omap_dss_rotation_type rotation_type,
- bool replication, const struct videomode *vm,
- bool mem_to_mem)
+static int dispc_ovl_setup_common(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ enum omap_overlay_caps caps,
+ u32 paddr, u32 p_uv_addr,
+ u16 screen_width, int pos_x, int pos_y,
+ u16 width, u16 height,
+ u16 out_width, u16 out_height,
+ u32 fourcc, u8 rotation, u8 zorder,
+ u8 pre_mult_alpha, u8 global_alpha,
+ enum omap_dss_rotation_type rotation_type,
+ bool replication, const struct videomode *vm,
+ bool mem_to_mem)
{
bool five_taps = true;
bool fieldmode = false;
int r, cconv = 0;
- unsigned offset0, offset1;
+ unsigned int offset0, offset1;
s32 row_inc;
s32 pix_inc;
u16 frame_width, frame_height;
@@ -2486,8 +2624,12 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
u16 in_width = width;
int x_predecim = 1, y_predecim = 1;
bool ilace = !!(vm->flags & DISPLAY_FLAGS_INTERLACED);
- unsigned long pclk = dispc_plane_pclk_rate(plane);
- unsigned long lclk = dispc_plane_lclk_rate(plane);
+ unsigned long pclk = dispc_plane_pclk_rate(dispc, plane);
+ unsigned long lclk = dispc_plane_lclk_rate(dispc, plane);
+
+ /* when setting up WB, dispc_plane_pclk_rate() returns 0 */
+ if (plane == OMAP_DSS_WB)
+ pclk = vm->pixelclock;
if (paddr == 0 && rotation_type != OMAP_DSS_ROT_TILER)
return -EINVAL;
@@ -2500,27 +2642,28 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
out_width = out_width == 0 ? width : out_width;
out_height = out_height == 0 ? height : out_height;
- if (ilace && height == out_height)
- fieldmode = true;
+ if (plane != OMAP_DSS_WB) {
+ if (ilace && height == out_height)
+ fieldmode = true;
- if (ilace) {
- if (fieldmode)
- in_height /= 2;
- pos_y /= 2;
- out_height /= 2;
+ if (ilace) {
+ if (fieldmode)
+ in_height /= 2;
+ pos_y /= 2;
+ out_height /= 2;
- DSSDBG("adjusting for ilace: height %d, pos_y %d, "
- "out_height %d\n", in_height, pos_y,
- out_height);
+ DSSDBG("adjusting for ilace: height %d, pos_y %d, out_height %d\n",
+ in_height, pos_y, out_height);
+ }
}
- if (!dispc_ovl_color_mode_supported(plane, fourcc))
+ if (!dispc_ovl_color_mode_supported(dispc, plane, fourcc))
return -EINVAL;
- r = dispc_ovl_calc_scaling(pclk, lclk, caps, vm, in_width,
- in_height, out_width, out_height, fourcc,
- &five_taps, &x_predecim, &y_predecim, pos_x,
- rotation_type, mem_to_mem);
+ r = dispc_ovl_calc_scaling(dispc, plane, pclk, lclk, caps, vm, in_width,
+ in_height, out_width, out_height, fourcc,
+ &five_taps, &x_predecim, &y_predecim, pos_x,
+ rotation_type, mem_to_mem);
if (r)
return r;
@@ -2582,60 +2725,62 @@ static int dispc_ovl_setup_common(enum omap_plane_id plane,
DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
offset0, offset1, row_inc, pix_inc);
- dispc_ovl_set_color_mode(plane, fourcc);
+ dispc_ovl_set_color_mode(dispc, plane, fourcc);
- dispc_ovl_configure_burst_type(plane, rotation_type);
+ dispc_ovl_configure_burst_type(dispc, plane, rotation_type);
- if (dispc.feat->reverse_ilace_field_order)
+ if (dispc->feat->reverse_ilace_field_order)
swap(offset0, offset1);
- dispc_ovl_set_ba0(plane, paddr + offset0);
- dispc_ovl_set_ba1(plane, paddr + offset1);
+ dispc_ovl_set_ba0(dispc, plane, paddr + offset0);
+ dispc_ovl_set_ba1(dispc, plane, paddr + offset1);
if (fourcc == DRM_FORMAT_NV12) {
- dispc_ovl_set_ba0_uv(plane, p_uv_addr + offset0);
- dispc_ovl_set_ba1_uv(plane, p_uv_addr + offset1);
+ dispc_ovl_set_ba0_uv(dispc, plane, p_uv_addr + offset0);
+ dispc_ovl_set_ba1_uv(dispc, plane, p_uv_addr + offset1);
}
- if (dispc.feat->last_pixel_inc_missing)
+ if (dispc->feat->last_pixel_inc_missing)
row_inc += pix_inc - 1;
- dispc_ovl_set_row_inc(plane, row_inc);
- dispc_ovl_set_pix_inc(plane, pix_inc);
+ dispc_ovl_set_row_inc(dispc, plane, row_inc);
+ dispc_ovl_set_pix_inc(dispc, plane, pix_inc);
DSSDBG("%d,%d %dx%d -> %dx%d\n", pos_x, pos_y, in_width,
in_height, out_width, out_height);
- dispc_ovl_set_pos(plane, caps, pos_x, pos_y);
+ dispc_ovl_set_pos(dispc, plane, caps, pos_x, pos_y);
- dispc_ovl_set_input_size(plane, in_width, in_height);
+ dispc_ovl_set_input_size(dispc, plane, in_width, in_height);
if (caps & OMAP_DSS_OVL_CAP_SCALE) {
- dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
- out_height, ilace, five_taps, fieldmode,
- fourcc, rotation);
- dispc_ovl_set_output_size(plane, out_width, out_height);
- dispc_ovl_set_vid_color_conv(plane, cconv);
+ dispc_ovl_set_scaling(dispc, plane, in_width, in_height,
+ out_width, out_height, ilace, five_taps,
+ fieldmode, fourcc, rotation);
+ dispc_ovl_set_output_size(dispc, plane, out_width, out_height);
+ dispc_ovl_set_vid_color_conv(dispc, plane, cconv);
}
- dispc_ovl_set_rotation_attrs(plane, rotation, rotation_type, fourcc);
+ dispc_ovl_set_rotation_attrs(dispc, plane, rotation, rotation_type,
+ fourcc);
- dispc_ovl_set_zorder(plane, caps, zorder);
- dispc_ovl_set_pre_mult_alpha(plane, caps, pre_mult_alpha);
- dispc_ovl_setup_global_alpha(plane, caps, global_alpha);
+ dispc_ovl_set_zorder(dispc, plane, caps, zorder);
+ dispc_ovl_set_pre_mult_alpha(dispc, plane, caps, pre_mult_alpha);
+ dispc_ovl_setup_global_alpha(dispc, plane, caps, global_alpha);
- dispc_ovl_enable_replication(plane, caps, replication);
+ dispc_ovl_enable_replication(dispc, plane, caps, replication);
return 0;
}
-static int dispc_ovl_setup(enum omap_plane_id plane,
- const struct omap_overlay_info *oi,
- const struct videomode *vm, bool mem_to_mem,
- enum omap_channel channel)
+static int dispc_ovl_setup(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ const struct omap_overlay_info *oi,
+ const struct videomode *vm, bool mem_to_mem,
+ enum omap_channel channel)
{
int r;
- enum omap_overlay_caps caps = dispc.feat->overlay_caps[plane];
+ enum omap_overlay_caps caps = dispc->feat->overlay_caps[plane];
const bool replication = true;
DSSDBG("dispc_ovl_setup %d, pa %pad, pa_uv %pad, sw %d, %d,%d, %dx%d ->"
@@ -2644,9 +2789,9 @@ static int dispc_ovl_setup(enum omap_plane_id plane,
oi->pos_y, oi->width, oi->height, oi->out_width, oi->out_height,
oi->fourcc, oi->rotation, channel, replication);
- dispc_ovl_set_channel_out(plane, channel);
+ dispc_ovl_set_channel_out(dispc, plane, channel);
- r = dispc_ovl_setup_common(plane, caps, oi->paddr, oi->p_uv_addr,
+ r = dispc_ovl_setup_common(dispc, plane, caps, oi->paddr, oi->p_uv_addr,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->fourcc, oi->rotation,
oi->zorder, oi->pre_mult_alpha, oi->global_alpha,
@@ -2655,8 +2800,10 @@ static int dispc_ovl_setup(enum omap_plane_id plane,
return r;
}
-int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct videomode *vm)
+static int dispc_wb_setup(struct dispc_device *dispc,
+ const struct omap_dss_writeback_info *wi,
+ bool mem_to_mem, const struct videomode *vm,
+ enum dss_writeback_channel channel_in)
{
int r;
u32 l;
@@ -2670,15 +2817,20 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
enum omap_overlay_caps caps =
OMAP_DSS_OVL_CAP_SCALE | OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA;
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ in_height /= 2;
+
DSSDBG("dispc_wb_setup, pa %x, pa_uv %x, %d,%d -> %dx%d, cmode %x, "
"rot %d\n", wi->paddr, wi->p_uv_addr, in_width,
in_height, wi->width, wi->height, wi->fourcc, wi->rotation);
- r = dispc_ovl_setup_common(plane, caps, wi->paddr, wi->p_uv_addr,
+ r = dispc_ovl_setup_common(dispc, plane, caps, wi->paddr, wi->p_uv_addr,
wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width,
wi->height, wi->fourcc, wi->rotation, zorder,
wi->pre_mult_alpha, global_alpha, wi->rotation_type,
replication, vm, mem_to_mem);
+ if (r)
+ return r;
switch (wi->fourcc) {
case DRM_FORMAT_RGB565:
@@ -2697,132 +2849,162 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
}
/* setup extra DISPC_WB_ATTRIBUTES */
- l = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+ l = dispc_read_reg(dispc, DISPC_OVL_ATTRIBUTES(plane));
l = FLD_MOD(l, truncation, 10, 10); /* TRUNCATIONENABLE */
+ l = FLD_MOD(l, channel_in, 18, 16); /* CHANNELIN */
l = FLD_MOD(l, mem_to_mem, 19, 19); /* WRITEBACKMODE */
if (mem_to_mem)
l = FLD_MOD(l, 1, 26, 24); /* CAPTUREMODE */
else
l = FLD_MOD(l, 0, 26, 24); /* CAPTUREMODE */
- dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), l);
+ dispc_write_reg(dispc, DISPC_OVL_ATTRIBUTES(plane), l);
if (mem_to_mem) {
/* WBDELAYCOUNT */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), 0, 7, 0);
} else {
- int wbdelay;
+ u32 wbdelay;
+
+ if (channel_in == DSS_WB_TV_MGR)
+ wbdelay = vm->vsync_len + vm->vback_porch;
+ else
+ wbdelay = vm->vfront_porch + vm->vsync_len +
+ vm->vback_porch;
+
+ if (vm->flags & DISPLAY_FLAGS_INTERLACED)
+ wbdelay /= 2;
- wbdelay = min(vm->vfront_porch +
- vm->vsync_len + vm->vback_porch, (u32)255);
+ wbdelay = min(wbdelay, 255u);
/* WBDELAYCOUNT */
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES2(plane), wbdelay, 7, 0);
}
- return r;
+ return 0;
+}
+
+static bool dispc_has_writeback(struct dispc_device *dispc)
+{
+ return dispc->feat->has_writeback;
}
-static int dispc_ovl_enable(enum omap_plane_id plane, bool enable)
+static int dispc_ovl_enable(struct dispc_device *dispc,
+ enum omap_plane_id plane, bool enable)
{
DSSDBG("dispc_enable_plane %d, %d\n", plane, enable);
- REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
+ REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), enable ? 1 : 0, 0, 0);
return 0;
}
-static enum omap_dss_output_id dispc_mgr_get_supported_outputs(enum omap_channel channel)
+static enum omap_dss_output_id
+dispc_mgr_get_supported_outputs(struct dispc_device *dispc,
+ enum omap_channel channel)
{
- return dss_get_supported_outputs(channel);
+ return dss_get_supported_outputs(dispc->dss, channel);
}
-static void dispc_lcd_enable_signal_polarity(bool act_high)
+static void dispc_lcd_enable_signal_polarity(struct dispc_device *dispc,
+ bool act_high)
{
- if (!dispc_has_feature(FEAT_LCDENABLEPOL))
+ if (!dispc_has_feature(dispc, FEAT_LCDENABLEPOL))
return;
- REG_FLD_MOD(DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
+ REG_FLD_MOD(dispc, DISPC_CONTROL, act_high ? 1 : 0, 29, 29);
}
-void dispc_lcd_enable_signal(bool enable)
+void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable)
{
- if (!dispc_has_feature(FEAT_LCDENABLESIGNAL))
+ if (!dispc_has_feature(dispc, FEAT_LCDENABLESIGNAL))
return;
- REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 28, 28);
+ REG_FLD_MOD(dispc, DISPC_CONTROL, enable ? 1 : 0, 28, 28);
}
-void dispc_pck_free_enable(bool enable)
+void dispc_pck_free_enable(struct dispc_device *dispc, bool enable)
{
- if (!dispc_has_feature(FEAT_PCKFREEENABLE))
+ if (!dispc_has_feature(dispc, FEAT_PCKFREEENABLE))
return;
- REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 27, 27);
+ REG_FLD_MOD(dispc, DISPC_CONTROL, enable ? 1 : 0, 27, 27);
}
-static void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_fifohandcheck(struct dispc_device *dispc,
+ enum omap_channel channel,
+ bool enable)
{
- mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable);
}
-static void dispc_mgr_set_lcd_type_tft(enum omap_channel channel)
+static void dispc_mgr_set_lcd_type_tft(struct dispc_device *dispc,
+ enum omap_channel channel)
{
- mgr_fld_write(channel, DISPC_MGR_FLD_STNTFT, 1);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STNTFT, 1);
}
-static void dispc_set_loadmode(enum omap_dss_load_mode mode)
+static void dispc_set_loadmode(struct dispc_device *dispc,
+ enum omap_dss_load_mode mode)
{
- REG_FLD_MOD(DISPC_CONFIG, mode, 2, 1);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, mode, 2, 1);
}
-static void dispc_mgr_set_default_color(enum omap_channel channel, u32 color)
+static void dispc_mgr_set_default_color(struct dispc_device *dispc,
+ enum omap_channel channel, u32 color)
{
- dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
+ dispc_write_reg(dispc, DISPC_DEFAULT_COLOR(channel), color);
}
-static void dispc_mgr_set_trans_key(enum omap_channel ch,
- enum omap_dss_trans_key_type type,
- u32 trans_key)
+static void dispc_mgr_set_trans_key(struct dispc_device *dispc,
+ enum omap_channel ch,
+ enum omap_dss_trans_key_type type,
+ u32 trans_key)
{
- mgr_fld_write(ch, DISPC_MGR_FLD_TCKSELECTION, type);
+ mgr_fld_write(dispc, ch, DISPC_MGR_FLD_TCKSELECTION, type);
- dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
+ dispc_write_reg(dispc, DISPC_TRANS_COLOR(ch), trans_key);
}
-static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
+static void dispc_mgr_enable_trans_key(struct dispc_device *dispc,
+ enum omap_channel ch, bool enable)
{
- mgr_fld_write(ch, DISPC_MGR_FLD_TCKENABLE, enable);
+ mgr_fld_write(dispc, ch, DISPC_MGR_FLD_TCKENABLE, enable);
}
-static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch,
- bool enable)
+static void dispc_mgr_enable_alpha_fixed_zorder(struct dispc_device *dispc,
+ enum omap_channel ch,
+ bool enable)
{
- if (!dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER))
+ if (!dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER))
return;
if (ch == OMAP_DSS_CHANNEL_LCD)
- REG_FLD_MOD(DISPC_CONFIG, enable, 18, 18);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 18, 18);
else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 19, 19);
}
-static void dispc_mgr_setup(enum omap_channel channel,
- const struct omap_overlay_manager_info *info)
+static void dispc_mgr_setup(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct omap_overlay_manager_info *info)
{
- dispc_mgr_set_default_color(channel, info->default_color);
- dispc_mgr_set_trans_key(channel, info->trans_key_type, info->trans_key);
- dispc_mgr_enable_trans_key(channel, info->trans_enabled);
- dispc_mgr_enable_alpha_fixed_zorder(channel,
+ dispc_mgr_set_default_color(dispc, channel, info->default_color);
+ dispc_mgr_set_trans_key(dispc, channel, info->trans_key_type,
+ info->trans_key);
+ dispc_mgr_enable_trans_key(dispc, channel, info->trans_enabled);
+ dispc_mgr_enable_alpha_fixed_zorder(dispc, channel,
info->partial_alpha_enabled);
- if (dispc_has_feature(FEAT_CPR)) {
- dispc_mgr_enable_cpr(channel, info->cpr_enable);
- dispc_mgr_set_cpr_coef(channel, &info->cpr_coefs);
+ if (dispc_has_feature(dispc, FEAT_CPR)) {
+ dispc_mgr_enable_cpr(dispc, channel, info->cpr_enable);
+ dispc_mgr_set_cpr_coef(dispc, channel, &info->cpr_coefs);
}
}
-static void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
+static void dispc_mgr_set_tft_data_lines(struct dispc_device *dispc,
+ enum omap_channel channel,
+ u8 data_lines)
{
int code;
@@ -2844,10 +3026,11 @@ static void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_line
return;
}
- mgr_fld_write(channel, DISPC_MGR_FLD_TFTDATALINES, code);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_TFTDATALINES, code);
}
-static void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
+static void dispc_mgr_set_io_pad_mode(struct dispc_device *dispc,
+ enum dss_io_pad_mode mode)
{
u32 l;
int gpout0, gpout1;
@@ -2870,68 +3053,74 @@ static void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode)
return;
}
- l = dispc_read_reg(DISPC_CONTROL);
+ l = dispc_read_reg(dispc, DISPC_CONTROL);
l = FLD_MOD(l, gpout0, 15, 15);
l = FLD_MOD(l, gpout1, 16, 16);
- dispc_write_reg(DISPC_CONTROL, l);
+ dispc_write_reg(dispc, DISPC_CONTROL, l);
}
-static void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_stallmode(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable)
{
- mgr_fld_write(channel, DISPC_MGR_FLD_STALLMODE, enable);
+ mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STALLMODE, enable);
}
-static void dispc_mgr_set_lcd_config(enum omap_channel channel,
- const struct dss_lcd_mgr_config *config)
+static void dispc_mgr_set_lcd_config(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config)
{
- dispc_mgr_set_io_pad_mode(config->io_pad_mode);
+ dispc_mgr_set_io_pad_mode(dispc, config->io_pad_mode);
- dispc_mgr_enable_stallmode(channel, config->stallmode);
- dispc_mgr_enable_fifohandcheck(channel, config->fifohandcheck);
+ dispc_mgr_enable_stallmode(dispc, channel, config->stallmode);
+ dispc_mgr_enable_fifohandcheck(dispc, channel, config->fifohandcheck);
- dispc_mgr_set_clock_div(channel, &config->clock_info);
+ dispc_mgr_set_clock_div(dispc, channel, &config->clock_info);
- dispc_mgr_set_tft_data_lines(channel, config->video_port_width);
+ dispc_mgr_set_tft_data_lines(dispc, channel, config->video_port_width);
- dispc_lcd_enable_signal_polarity(config->lcden_sig_polarity);
+ dispc_lcd_enable_signal_polarity(dispc, config->lcden_sig_polarity);
- dispc_mgr_set_lcd_type_tft(channel);
+ dispc_mgr_set_lcd_type_tft(dispc, channel);
}
-static bool _dispc_mgr_size_ok(u16 width, u16 height)
+static bool _dispc_mgr_size_ok(struct dispc_device *dispc,
+ u16 width, u16 height)
{
- return width <= dispc.feat->mgr_width_max &&
- height <= dispc.feat->mgr_height_max;
+ return width <= dispc->feat->mgr_width_max &&
+ height <= dispc->feat->mgr_height_max;
}
-static bool _dispc_lcd_timings_ok(int hsync_len, int hfp, int hbp,
- int vsw, int vfp, int vbp)
+static bool _dispc_lcd_timings_ok(struct dispc_device *dispc,
+ int hsync_len, int hfp, int hbp,
+ int vsw, int vfp, int vbp)
{
- if (hsync_len < 1 || hsync_len > dispc.feat->sw_max ||
- hfp < 1 || hfp > dispc.feat->hp_max ||
- hbp < 1 || hbp > dispc.feat->hp_max ||
- vsw < 1 || vsw > dispc.feat->sw_max ||
- vfp < 0 || vfp > dispc.feat->vp_max ||
- vbp < 0 || vbp > dispc.feat->vp_max)
+ if (hsync_len < 1 || hsync_len > dispc->feat->sw_max ||
+ hfp < 1 || hfp > dispc->feat->hp_max ||
+ hbp < 1 || hbp > dispc->feat->hp_max ||
+ vsw < 1 || vsw > dispc->feat->sw_max ||
+ vfp < 0 || vfp > dispc->feat->vp_max ||
+ vbp < 0 || vbp > dispc->feat->vp_max)
return false;
return true;
}
-static bool _dispc_mgr_pclk_ok(enum omap_channel channel,
- unsigned long pclk)
+static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc,
+ enum omap_channel channel,
+ unsigned long pclk)
{
if (dss_mgr_is_lcd(channel))
- return pclk <= dispc.feat->max_lcd_pclk;
+ return pclk <= dispc->feat->max_lcd_pclk;
else
- return pclk <= dispc.feat->max_tv_pclk;
+ return pclk <= dispc->feat->max_tv_pclk;
}
-bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
+bool dispc_mgr_timings_ok(struct dispc_device *dispc, enum omap_channel channel,
+ const struct videomode *vm)
{
- if (!_dispc_mgr_size_ok(vm->hactive, vm->vactive))
+ if (!_dispc_mgr_size_ok(dispc, vm->hactive, vm->vactive))
return false;
- if (!_dispc_mgr_pclk_ok(channel, vm->pixelclock))
+ if (!_dispc_mgr_pclk_ok(dispc, channel, vm->pixelclock))
return false;
if (dss_mgr_is_lcd(channel)) {
@@ -2939,7 +3128,7 @@ bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
return false;
- if (!_dispc_lcd_timings_ok(vm->hsync_len,
+ if (!_dispc_lcd_timings_ok(dispc, vm->hsync_len,
vm->hfront_porch, vm->hback_porch,
vm->vsync_len, vm->vfront_porch,
vm->vback_porch))
@@ -2949,21 +3138,22 @@ bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm)
return true;
}
-static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
+static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
const struct videomode *vm)
{
u32 timing_h, timing_v, l;
bool onoff, rf, ipc, vs, hs, de;
- timing_h = FLD_VAL(vm->hsync_len - 1, dispc.feat->sw_start, 0) |
- FLD_VAL(vm->hfront_porch - 1, dispc.feat->fp_start, 8) |
- FLD_VAL(vm->hback_porch - 1, dispc.feat->bp_start, 20);
- timing_v = FLD_VAL(vm->vsync_len - 1, dispc.feat->sw_start, 0) |
- FLD_VAL(vm->vfront_porch, dispc.feat->fp_start, 8) |
- FLD_VAL(vm->vback_porch, dispc.feat->bp_start, 20);
+ timing_h = FLD_VAL(vm->hsync_len - 1, dispc->feat->sw_start, 0) |
+ FLD_VAL(vm->hfront_porch - 1, dispc->feat->fp_start, 8) |
+ FLD_VAL(vm->hback_porch - 1, dispc->feat->bp_start, 20);
+ timing_v = FLD_VAL(vm->vsync_len - 1, dispc->feat->sw_start, 0) |
+ FLD_VAL(vm->vfront_porch, dispc->feat->fp_start, 8) |
+ FLD_VAL(vm->vback_porch, dispc->feat->bp_start, 20);
- dispc_write_reg(DISPC_TIMING_H(channel), timing_h);
- dispc_write_reg(DISPC_TIMING_V(channel), timing_v);
+ dispc_write_reg(dispc, DISPC_TIMING_H(channel), timing_h);
+ dispc_write_reg(dispc, DISPC_TIMING_V(channel), timing_v);
if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
vs = false;
@@ -3001,12 +3191,12 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
FLD_VAL(vs, 12, 12);
/* always set ALIGN bit when available */
- if (dispc.feat->supports_sync_align)
+ if (dispc->feat->supports_sync_align)
l |= (1 << 18);
- dispc_write_reg(DISPC_POL_FREQ(channel), l);
+ dispc_write_reg(dispc, DISPC_POL_FREQ(channel), l);
- if (dispc.syscon_pol) {
+ if (dispc->syscon_pol) {
const int shifts[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
[OMAP_DSS_CHANNEL_LCD2] = 1,
@@ -3021,8 +3211,8 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel,
mask <<= 16 + shifts[channel];
val <<= 16 + shifts[channel];
- regmap_update_bits(dispc.syscon_pol, dispc.syscon_pol_offset,
- mask, val);
+ regmap_update_bits(dispc->syscon_pol, dispc->syscon_pol_offset,
+ mask, val);
}
}
@@ -3037,22 +3227,23 @@ static int vm_flag_to_int(enum display_flags flags, enum display_flags high,
}
/* change name to mode? */
-static void dispc_mgr_set_timings(enum omap_channel channel,
- const struct videomode *vm)
+static void dispc_mgr_set_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm)
{
- unsigned xtot, ytot;
+ unsigned int xtot, ytot;
unsigned long ht, vt;
struct videomode t = *vm;
DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
- if (!dispc_mgr_timings_ok(channel, &t)) {
+ if (!dispc_mgr_timings_ok(dispc, channel, &t)) {
BUG();
return;
}
if (dss_mgr_is_lcd(channel)) {
- _dispc_mgr_set_lcd_timings(channel, &t);
+ _dispc_mgr_set_lcd_timings(dispc, channel, &t);
xtot = t.hactive + t.hfront_porch + t.hsync_len + t.hback_porch;
ytot = t.vactive + t.vfront_porch + t.vsync_len + t.vback_porch;
@@ -3076,52 +3267,54 @@ static void dispc_mgr_set_timings(enum omap_channel channel,
if (t.flags & DISPLAY_FLAGS_INTERLACED)
t.vactive /= 2;
- if (dispc.feat->supports_double_pixel)
- REG_FLD_MOD(DISPC_CONTROL,
+ if (dispc->feat->supports_double_pixel)
+ REG_FLD_MOD(dispc, DISPC_CONTROL,
!!(t.flags & DISPLAY_FLAGS_DOUBLECLK),
19, 17);
}
- dispc_mgr_set_size(channel, t.hactive, t.vactive);
+ dispc_mgr_set_size(dispc, channel, t.hactive, t.vactive);
}
-static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
- u16 pck_div)
+static void dispc_mgr_set_lcd_divisor(struct dispc_device *dispc,
+ enum omap_channel channel, u16 lck_div,
+ u16 pck_div)
{
BUG_ON(lck_div < 1);
BUG_ON(pck_div < 1);
- dispc_write_reg(DISPC_DIVISORo(channel),
+ dispc_write_reg(dispc, DISPC_DIVISORo(channel),
FLD_VAL(lck_div, 23, 16) | FLD_VAL(pck_div, 7, 0));
- if (!dispc_has_feature(FEAT_CORE_CLK_DIV) &&
+ if (!dispc_has_feature(dispc, FEAT_CORE_CLK_DIV) &&
channel == OMAP_DSS_CHANNEL_LCD)
- dispc.core_clk_rate = dispc_fclk_rate() / lck_div;
+ dispc->core_clk_rate = dispc_fclk_rate(dispc) / lck_div;
}
-static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
- int *pck_div)
+static void dispc_mgr_get_lcd_divisor(struct dispc_device *dispc,
+ enum omap_channel channel, int *lck_div,
+ int *pck_div)
{
u32 l;
- l = dispc_read_reg(DISPC_DIVISORo(channel));
+ l = dispc_read_reg(dispc, DISPC_DIVISORo(channel));
*lck_div = FLD_GET(l, 23, 16);
*pck_div = FLD_GET(l, 7, 0);
}
-static unsigned long dispc_fclk_rate(void)
+static unsigned long dispc_fclk_rate(struct dispc_device *dispc)
{
unsigned long r;
enum dss_clk_source src;
- src = dss_get_dispc_clk_source();
+ src = dss_get_dispc_clk_source(dispc->dss);
if (src == DSS_CLK_SRC_FCK) {
- r = dss_get_dispc_clk_rate();
+ r = dss_get_dispc_clk_rate(dispc->dss);
} else {
struct dss_pll *pll;
- unsigned clkout_idx;
+ unsigned int clkout_idx;
- pll = dss_pll_find_by_src(src);
+ pll = dss_pll_find_by_src(dispc->dss, src);
clkout_idx = dss_pll_get_clkout_idx_for_src(src);
r = pll->cinfo.clkout[clkout_idx];
@@ -3130,7 +3323,8 @@ static unsigned long dispc_fclk_rate(void)
return r;
}
-static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
+static unsigned long dispc_mgr_lclk_rate(struct dispc_device *dispc,
+ enum omap_channel channel)
{
int lcd;
unsigned long r;
@@ -3138,28 +3332,29 @@ static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
/* for TV, LCLK rate is the FCLK rate */
if (!dss_mgr_is_lcd(channel))
- return dispc_fclk_rate();
+ return dispc_fclk_rate(dispc);
- src = dss_get_lcd_clk_source(channel);
+ src = dss_get_lcd_clk_source(dispc->dss, channel);
if (src == DSS_CLK_SRC_FCK) {
- r = dss_get_dispc_clk_rate();
+ r = dss_get_dispc_clk_rate(dispc->dss);
} else {
struct dss_pll *pll;
- unsigned clkout_idx;
+ unsigned int clkout_idx;
- pll = dss_pll_find_by_src(src);
+ pll = dss_pll_find_by_src(dispc->dss, src);
clkout_idx = dss_pll_get_clkout_idx_for_src(src);
r = pll->cinfo.clkout[clkout_idx];
}
- lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16);
+ lcd = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16);
return r / lcd;
}
-static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
+static unsigned long dispc_mgr_pclk_rate(struct dispc_device *dispc,
+ enum omap_channel channel)
{
unsigned long r;
@@ -3167,109 +3362,115 @@ static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
int pcd;
u32 l;
- l = dispc_read_reg(DISPC_DIVISORo(channel));
+ l = dispc_read_reg(dispc, DISPC_DIVISORo(channel));
pcd = FLD_GET(l, 7, 0);
- r = dispc_mgr_lclk_rate(channel);
+ r = dispc_mgr_lclk_rate(dispc, channel);
return r / pcd;
} else {
- return dispc.tv_pclk_rate;
+ return dispc->tv_pclk_rate;
}
}
-void dispc_set_tv_pclk(unsigned long pclk)
+void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk)
{
- dispc.tv_pclk_rate = pclk;
+ dispc->tv_pclk_rate = pclk;
}
-static unsigned long dispc_core_clk_rate(void)
+static unsigned long dispc_core_clk_rate(struct dispc_device *dispc)
{
- return dispc.core_clk_rate;
+ return dispc->core_clk_rate;
}
-static unsigned long dispc_plane_pclk_rate(enum omap_plane_id plane)
+static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
enum omap_channel channel;
if (plane == OMAP_DSS_WB)
return 0;
- channel = dispc_ovl_get_channel_out(plane);
+ channel = dispc_ovl_get_channel_out(dispc, plane);
- return dispc_mgr_pclk_rate(channel);
+ return dispc_mgr_pclk_rate(dispc, channel);
}
-static unsigned long dispc_plane_lclk_rate(enum omap_plane_id plane)
+static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc,
+ enum omap_plane_id plane)
{
enum omap_channel channel;
if (plane == OMAP_DSS_WB)
return 0;
- channel = dispc_ovl_get_channel_out(plane);
+ channel = dispc_ovl_get_channel_out(dispc, plane);
- return dispc_mgr_lclk_rate(channel);
+ return dispc_mgr_lclk_rate(dispc, channel);
}
-static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
+static void dispc_dump_clocks_channel(struct dispc_device *dispc,
+ struct seq_file *s,
+ enum omap_channel channel)
{
int lcd, pcd;
enum dss_clk_source lcd_clk_src;
seq_printf(s, "- %s -\n", mgr_desc[channel].name);
- lcd_clk_src = dss_get_lcd_clk_source(channel);
+ lcd_clk_src = dss_get_lcd_clk_source(dispc->dss, channel);
seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name,
dss_get_clk_source_name(lcd_clk_src));
- dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd);
+ dispc_mgr_get_lcd_divisor(dispc, channel, &lcd, &pcd);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
- dispc_mgr_lclk_rate(channel), lcd);
+ dispc_mgr_lclk_rate(dispc, channel), lcd);
seq_printf(s, "pck\t\t%-16lupck div\t%u\n",
- dispc_mgr_pclk_rate(channel), pcd);
+ dispc_mgr_pclk_rate(dispc, channel), pcd);
}
-void dispc_dump_clocks(struct seq_file *s)
+void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s)
{
+ enum dss_clk_source dispc_clk_src;
int lcd;
u32 l;
- enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
- if (dispc_runtime_get())
+ if (dispc_runtime_get(dispc))
return;
seq_printf(s, "- DISPC -\n");
+ dispc_clk_src = dss_get_dispc_clk_source(dispc->dss);
seq_printf(s, "dispc fclk source = %s\n",
dss_get_clk_source_name(dispc_clk_src));
- seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
+ seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate(dispc));
- if (dispc_has_feature(FEAT_CORE_CLK_DIV)) {
+ if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV)) {
seq_printf(s, "- DISPC-CORE-CLK -\n");
- l = dispc_read_reg(DISPC_DIVISOR);
+ l = dispc_read_reg(dispc, DISPC_DIVISOR);
lcd = FLD_GET(l, 23, 16);
seq_printf(s, "lck\t\t%-16lulck div\t%u\n",
- (dispc_fclk_rate()/lcd), lcd);
+ (dispc_fclk_rate(dispc)/lcd), lcd);
}
- dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD);
+ dispc_dump_clocks_channel(dispc, s, OMAP_DSS_CHANNEL_LCD);
- if (dispc_has_feature(FEAT_MGR_LCD2))
- dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD2);
- if (dispc_has_feature(FEAT_MGR_LCD3))
- dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD3);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
+ dispc_dump_clocks_channel(dispc, s, OMAP_DSS_CHANNEL_LCD2);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
+ dispc_dump_clocks_channel(dispc, s, OMAP_DSS_CHANNEL_LCD3);
- dispc_runtime_put();
+ dispc_runtime_put(dispc);
}
-static void dispc_dump_regs(struct seq_file *s)
+static int dispc_dump_regs(struct seq_file *s, void *p)
{
+ struct dispc_device *dispc = s->private;
int i, j;
const char *mgr_names[] = {
[OMAP_DSS_CHANNEL_LCD] = "LCD",
@@ -3286,186 +3487,190 @@ static void dispc_dump_regs(struct seq_file *s)
};
const char **p_names;
-#define DUMPREG(r) seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(r))
+#define DUMPREG(dispc, r) \
+ seq_printf(s, "%-50s %08x\n", #r, dispc_read_reg(dispc, r))
- if (dispc_runtime_get())
- return;
+ if (dispc_runtime_get(dispc))
+ return 0;
/* DISPC common registers */
- DUMPREG(DISPC_REVISION);
- DUMPREG(DISPC_SYSCONFIG);
- DUMPREG(DISPC_SYSSTATUS);
- DUMPREG(DISPC_IRQSTATUS);
- DUMPREG(DISPC_IRQENABLE);
- DUMPREG(DISPC_CONTROL);
- DUMPREG(DISPC_CONFIG);
- DUMPREG(DISPC_CAPABLE);
- DUMPREG(DISPC_LINE_STATUS);
- DUMPREG(DISPC_LINE_NUMBER);
- if (dispc_has_feature(FEAT_ALPHA_FIXED_ZORDER) ||
- dispc_has_feature(FEAT_ALPHA_FREE_ZORDER))
- DUMPREG(DISPC_GLOBAL_ALPHA);
- if (dispc_has_feature(FEAT_MGR_LCD2)) {
- DUMPREG(DISPC_CONTROL2);
- DUMPREG(DISPC_CONFIG2);
+ DUMPREG(dispc, DISPC_REVISION);
+ DUMPREG(dispc, DISPC_SYSCONFIG);
+ DUMPREG(dispc, DISPC_SYSSTATUS);
+ DUMPREG(dispc, DISPC_IRQSTATUS);
+ DUMPREG(dispc, DISPC_IRQENABLE);
+ DUMPREG(dispc, DISPC_CONTROL);
+ DUMPREG(dispc, DISPC_CONFIG);
+ DUMPREG(dispc, DISPC_CAPABLE);
+ DUMPREG(dispc, DISPC_LINE_STATUS);
+ DUMPREG(dispc, DISPC_LINE_NUMBER);
+ if (dispc_has_feature(dispc, FEAT_ALPHA_FIXED_ZORDER) ||
+ dispc_has_feature(dispc, FEAT_ALPHA_FREE_ZORDER))
+ DUMPREG(dispc, DISPC_GLOBAL_ALPHA);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2)) {
+ DUMPREG(dispc, DISPC_CONTROL2);
+ DUMPREG(dispc, DISPC_CONFIG2);
}
- if (dispc_has_feature(FEAT_MGR_LCD3)) {
- DUMPREG(DISPC_CONTROL3);
- DUMPREG(DISPC_CONFIG3);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3)) {
+ DUMPREG(dispc, DISPC_CONTROL3);
+ DUMPREG(dispc, DISPC_CONFIG3);
}
- if (dispc_has_feature(FEAT_MFLAG))
- DUMPREG(DISPC_GLOBAL_MFLAG_ATTRIBUTE);
+ if (dispc_has_feature(dispc, FEAT_MFLAG))
+ DUMPREG(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE);
#undef DUMPREG
#define DISPC_REG(i, name) name(i)
-#define DUMPREG(i, r) seq_printf(s, "%s(%s)%*s %08x\n", #r, p_names[i], \
+#define DUMPREG(dispc, i, r) seq_printf(s, "%s(%s)%*s %08x\n", #r, p_names[i], \
(int)(48 - strlen(#r) - strlen(p_names[i])), " ", \
- dispc_read_reg(DISPC_REG(i, r)))
+ dispc_read_reg(dispc, DISPC_REG(i, r)))
p_names = mgr_names;
/* DISPC channel specific registers */
- for (i = 0; i < dispc_get_num_mgrs(); i++) {
- DUMPREG(i, DISPC_DEFAULT_COLOR);
- DUMPREG(i, DISPC_TRANS_COLOR);
- DUMPREG(i, DISPC_SIZE_MGR);
+ for (i = 0; i < dispc_get_num_mgrs(dispc); i++) {
+ DUMPREG(dispc, i, DISPC_DEFAULT_COLOR);
+ DUMPREG(dispc, i, DISPC_TRANS_COLOR);
+ DUMPREG(dispc, i, DISPC_SIZE_MGR);
if (i == OMAP_DSS_CHANNEL_DIGIT)
continue;
- DUMPREG(i, DISPC_TIMING_H);
- DUMPREG(i, DISPC_TIMING_V);
- DUMPREG(i, DISPC_POL_FREQ);
- DUMPREG(i, DISPC_DIVISORo);
+ DUMPREG(dispc, i, DISPC_TIMING_H);
+ DUMPREG(dispc, i, DISPC_TIMING_V);
+ DUMPREG(dispc, i, DISPC_POL_FREQ);
+ DUMPREG(dispc, i, DISPC_DIVISORo);
- DUMPREG(i, DISPC_DATA_CYCLE1);
- DUMPREG(i, DISPC_DATA_CYCLE2);
- DUMPREG(i, DISPC_DATA_CYCLE3);
+ DUMPREG(dispc, i, DISPC_DATA_CYCLE1);
+ DUMPREG(dispc, i, DISPC_DATA_CYCLE2);
+ DUMPREG(dispc, i, DISPC_DATA_CYCLE3);
- if (dispc_has_feature(FEAT_CPR)) {
- DUMPREG(i, DISPC_CPR_COEF_R);
- DUMPREG(i, DISPC_CPR_COEF_G);
- DUMPREG(i, DISPC_CPR_COEF_B);
+ if (dispc_has_feature(dispc, FEAT_CPR)) {
+ DUMPREG(dispc, i, DISPC_CPR_COEF_R);
+ DUMPREG(dispc, i, DISPC_CPR_COEF_G);
+ DUMPREG(dispc, i, DISPC_CPR_COEF_B);
}
}
p_names = ovl_names;
- for (i = 0; i < dispc_get_num_ovls(); i++) {
- DUMPREG(i, DISPC_OVL_BA0);
- DUMPREG(i, DISPC_OVL_BA1);
- DUMPREG(i, DISPC_OVL_POSITION);
- DUMPREG(i, DISPC_OVL_SIZE);
- DUMPREG(i, DISPC_OVL_ATTRIBUTES);
- DUMPREG(i, DISPC_OVL_FIFO_THRESHOLD);
- DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS);
- DUMPREG(i, DISPC_OVL_ROW_INC);
- DUMPREG(i, DISPC_OVL_PIXEL_INC);
-
- if (dispc_has_feature(FEAT_PRELOAD))
- DUMPREG(i, DISPC_OVL_PRELOAD);
- if (dispc_has_feature(FEAT_MFLAG))
- DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
+ for (i = 0; i < dispc_get_num_ovls(dispc); i++) {
+ DUMPREG(dispc, i, DISPC_OVL_BA0);
+ DUMPREG(dispc, i, DISPC_OVL_BA1);
+ DUMPREG(dispc, i, DISPC_OVL_POSITION);
+ DUMPREG(dispc, i, DISPC_OVL_SIZE);
+ DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES);
+ DUMPREG(dispc, i, DISPC_OVL_FIFO_THRESHOLD);
+ DUMPREG(dispc, i, DISPC_OVL_FIFO_SIZE_STATUS);
+ DUMPREG(dispc, i, DISPC_OVL_ROW_INC);
+ DUMPREG(dispc, i, DISPC_OVL_PIXEL_INC);
+
+ if (dispc_has_feature(dispc, FEAT_PRELOAD))
+ DUMPREG(dispc, i, DISPC_OVL_PRELOAD);
+ if (dispc_has_feature(dispc, FEAT_MFLAG))
+ DUMPREG(dispc, i, DISPC_OVL_MFLAG_THRESHOLD);
if (i == OMAP_DSS_GFX) {
- DUMPREG(i, DISPC_OVL_WINDOW_SKIP);
- DUMPREG(i, DISPC_OVL_TABLE_BA);
+ DUMPREG(dispc, i, DISPC_OVL_WINDOW_SKIP);
+ DUMPREG(dispc, i, DISPC_OVL_TABLE_BA);
continue;
}
- DUMPREG(i, DISPC_OVL_FIR);
- DUMPREG(i, DISPC_OVL_PICTURE_SIZE);
- DUMPREG(i, DISPC_OVL_ACCU0);
- DUMPREG(i, DISPC_OVL_ACCU1);
- if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- DUMPREG(i, DISPC_OVL_BA0_UV);
- DUMPREG(i, DISPC_OVL_BA1_UV);
- DUMPREG(i, DISPC_OVL_FIR2);
- DUMPREG(i, DISPC_OVL_ACCU2_0);
- DUMPREG(i, DISPC_OVL_ACCU2_1);
+ DUMPREG(dispc, i, DISPC_OVL_FIR);
+ DUMPREG(dispc, i, DISPC_OVL_PICTURE_SIZE);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU0);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU1);
+ if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
+ DUMPREG(dispc, i, DISPC_OVL_BA0_UV);
+ DUMPREG(dispc, i, DISPC_OVL_BA1_UV);
+ DUMPREG(dispc, i, DISPC_OVL_FIR2);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU2_0);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU2_1);
}
- if (dispc_has_feature(FEAT_ATTR2))
- DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
+ if (dispc_has_feature(dispc, FEAT_ATTR2))
+ DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES2);
}
- if (dispc.feat->has_writeback) {
+ if (dispc->feat->has_writeback) {
i = OMAP_DSS_WB;
- DUMPREG(i, DISPC_OVL_BA0);
- DUMPREG(i, DISPC_OVL_BA1);
- DUMPREG(i, DISPC_OVL_SIZE);
- DUMPREG(i, DISPC_OVL_ATTRIBUTES);
- DUMPREG(i, DISPC_OVL_FIFO_THRESHOLD);
- DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS);
- DUMPREG(i, DISPC_OVL_ROW_INC);
- DUMPREG(i, DISPC_OVL_PIXEL_INC);
-
- if (dispc_has_feature(FEAT_MFLAG))
- DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
-
- DUMPREG(i, DISPC_OVL_FIR);
- DUMPREG(i, DISPC_OVL_PICTURE_SIZE);
- DUMPREG(i, DISPC_OVL_ACCU0);
- DUMPREG(i, DISPC_OVL_ACCU1);
- if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
- DUMPREG(i, DISPC_OVL_BA0_UV);
- DUMPREG(i, DISPC_OVL_BA1_UV);
- DUMPREG(i, DISPC_OVL_FIR2);
- DUMPREG(i, DISPC_OVL_ACCU2_0);
- DUMPREG(i, DISPC_OVL_ACCU2_1);
+ DUMPREG(dispc, i, DISPC_OVL_BA0);
+ DUMPREG(dispc, i, DISPC_OVL_BA1);
+ DUMPREG(dispc, i, DISPC_OVL_SIZE);
+ DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES);
+ DUMPREG(dispc, i, DISPC_OVL_FIFO_THRESHOLD);
+ DUMPREG(dispc, i, DISPC_OVL_FIFO_SIZE_STATUS);
+ DUMPREG(dispc, i, DISPC_OVL_ROW_INC);
+ DUMPREG(dispc, i, DISPC_OVL_PIXEL_INC);
+
+ if (dispc_has_feature(dispc, FEAT_MFLAG))
+ DUMPREG(dispc, i, DISPC_OVL_MFLAG_THRESHOLD);
+
+ DUMPREG(dispc, i, DISPC_OVL_FIR);
+ DUMPREG(dispc, i, DISPC_OVL_PICTURE_SIZE);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU0);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU1);
+ if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
+ DUMPREG(dispc, i, DISPC_OVL_BA0_UV);
+ DUMPREG(dispc, i, DISPC_OVL_BA1_UV);
+ DUMPREG(dispc, i, DISPC_OVL_FIR2);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU2_0);
+ DUMPREG(dispc, i, DISPC_OVL_ACCU2_1);
}
- if (dispc_has_feature(FEAT_ATTR2))
- DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
+ if (dispc_has_feature(dispc, FEAT_ATTR2))
+ DUMPREG(dispc, i, DISPC_OVL_ATTRIBUTES2);
}
#undef DISPC_REG
#undef DUMPREG
#define DISPC_REG(plane, name, i) name(plane, i)
-#define DUMPREG(plane, name, i) \
+#define DUMPREG(dispc, plane, name, i) \
seq_printf(s, "%s_%d(%s)%*s %08x\n", #name, i, p_names[plane], \
(int)(46 - strlen(#name) - strlen(p_names[plane])), " ", \
- dispc_read_reg(DISPC_REG(plane, name, i)))
+ dispc_read_reg(dispc, DISPC_REG(plane, name, i)))
/* Video pipeline coefficient registers */
/* start from OMAP_DSS_VIDEO1 */
- for (i = 1; i < dispc_get_num_ovls(); i++) {
+ for (i = 1; i < dispc_get_num_ovls(dispc); i++) {
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_H, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_H, j);
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_HV, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_HV, j);
for (j = 0; j < 5; j++)
- DUMPREG(i, DISPC_OVL_CONV_COEF, j);
+ DUMPREG(dispc, i, DISPC_OVL_CONV_COEF, j);
- if (dispc_has_feature(FEAT_FIR_COEF_V)) {
+ if (dispc_has_feature(dispc, FEAT_FIR_COEF_V)) {
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_V, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_V, j);
}
- if (dispc_has_feature(FEAT_HANDLE_UV_SEPARATE)) {
+ if (dispc_has_feature(dispc, FEAT_HANDLE_UV_SEPARATE)) {
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_H2, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_H2, j);
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_HV2, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_HV2, j);
for (j = 0; j < 8; j++)
- DUMPREG(i, DISPC_OVL_FIR_COEF_V2, j);
+ DUMPREG(dispc, i, DISPC_OVL_FIR_COEF_V2, j);
}
}
- dispc_runtime_put();
+ dispc_runtime_put(dispc);
#undef DISPC_REG
#undef DUMPREG
+
+ return 0;
}
/* calculate clock rates using dividers in cinfo */
-int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
- struct dispc_clock_info *cinfo)
+int dispc_calc_clock_rates(struct dispc_device *dispc,
+ unsigned long dispc_fclk_rate,
+ struct dispc_clock_info *cinfo)
{
if (cinfo->lck_div > 255 || cinfo->lck_div == 0)
return -EINVAL;
@@ -3478,16 +3683,16 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
return 0;
}
-bool dispc_div_calc(unsigned long dispc_freq,
- unsigned long pck_min, unsigned long pck_max,
- dispc_div_calc_func func, void *data)
+bool dispc_div_calc(struct dispc_device *dispc, unsigned long dispc_freq,
+ unsigned long pck_min, unsigned long pck_max,
+ dispc_div_calc_func func, void *data)
{
int lckd, lckd_start, lckd_stop;
int pckd, pckd_start, pckd_stop;
unsigned long pck, lck;
unsigned long lck_max;
unsigned long pckd_hw_min, pckd_hw_max;
- unsigned min_fck_per_pck;
+ unsigned int min_fck_per_pck;
unsigned long fck;
#ifdef CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK
@@ -3496,10 +3701,10 @@ bool dispc_div_calc(unsigned long dispc_freq,
min_fck_per_pck = 0;
#endif
- pckd_hw_min = dispc.feat->min_pcd;
+ pckd_hw_min = dispc->feat->min_pcd;
pckd_hw_max = 255;
- lck_max = dss_get_max_fck_rate();
+ lck_max = dss_get_max_fck_rate(dispc->dss);
pck_min = pck_min ? pck_min : 1;
pck_max = pck_max ? pck_max : ULONG_MAX;
@@ -3522,8 +3727,8 @@ bool dispc_div_calc(unsigned long dispc_freq,
* also. Thus we need to use the calculated lck. For
* OMAP4+ the DISPC fclk is a separate clock.
*/
- if (dispc_has_feature(FEAT_CORE_CLK_DIV))
- fck = dispc_core_clk_rate();
+ if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV))
+ fck = dispc_core_clk_rate(dispc);
else
fck = lck;
@@ -3538,24 +3743,27 @@ bool dispc_div_calc(unsigned long dispc_freq,
return false;
}
-void dispc_mgr_set_clock_div(enum omap_channel channel,
- const struct dispc_clock_info *cinfo)
+void dispc_mgr_set_clock_div(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dispc_clock_info *cinfo)
{
DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div);
DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div);
- dispc_mgr_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div);
+ dispc_mgr_set_lcd_divisor(dispc, channel, cinfo->lck_div,
+ cinfo->pck_div);
}
-int dispc_mgr_get_clock_div(enum omap_channel channel,
- struct dispc_clock_info *cinfo)
+int dispc_mgr_get_clock_div(struct dispc_device *dispc,
+ enum omap_channel channel,
+ struct dispc_clock_info *cinfo)
{
unsigned long fck;
- fck = dispc_fclk_rate();
+ fck = dispc_fclk_rate(dispc);
- cinfo->lck_div = REG_GET(DISPC_DIVISORo(channel), 23, 16);
- cinfo->pck_div = REG_GET(DISPC_DIVISORo(channel), 7, 0);
+ cinfo->lck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 23, 16);
+ cinfo->pck_div = REG_GET(dispc, DISPC_DIVISORo(channel), 7, 0);
cinfo->lck = fck / cinfo->lck_div;
cinfo->pck = cinfo->lck / cinfo->pck_div;
@@ -3563,53 +3771,56 @@ int dispc_mgr_get_clock_div(enum omap_channel channel,
return 0;
}
-static u32 dispc_read_irqstatus(void)
+static u32 dispc_read_irqstatus(struct dispc_device *dispc)
{
- return dispc_read_reg(DISPC_IRQSTATUS);
+ return dispc_read_reg(dispc, DISPC_IRQSTATUS);
}
-static void dispc_clear_irqstatus(u32 mask)
+static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask)
{
- dispc_write_reg(DISPC_IRQSTATUS, mask);
+ dispc_write_reg(dispc, DISPC_IRQSTATUS, mask);
}
-static void dispc_write_irqenable(u32 mask)
+static void dispc_write_irqenable(struct dispc_device *dispc, u32 mask)
{
- u32 old_mask = dispc_read_reg(DISPC_IRQENABLE);
+ u32 old_mask = dispc_read_reg(dispc, DISPC_IRQENABLE);
/* clear the irqstatus for newly enabled irqs */
- dispc_clear_irqstatus((mask ^ old_mask) & mask);
+ dispc_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
- dispc_write_reg(DISPC_IRQENABLE, mask);
+ dispc_write_reg(dispc, DISPC_IRQENABLE, mask);
/* flush posted write */
- dispc_read_reg(DISPC_IRQENABLE);
+ dispc_read_reg(dispc, DISPC_IRQENABLE);
}
-void dispc_enable_sidle(void)
+void dispc_enable_sidle(struct dispc_device *dispc)
{
- REG_FLD_MOD(DISPC_SYSCONFIG, 2, 4, 3); /* SIDLEMODE: smart idle */
+ /* SIDLEMODE: smart idle */
+ REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 2, 4, 3);
}
-void dispc_disable_sidle(void)
+void dispc_disable_sidle(struct dispc_device *dispc)
{
- REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
+ REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
}
-static u32 dispc_mgr_gamma_size(enum omap_channel channel)
+static u32 dispc_mgr_gamma_size(struct dispc_device *dispc,
+ enum omap_channel channel)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
- if (!dispc.feat->has_gamma_table)
+ if (!dispc->feat->has_gamma_table)
return 0;
return gdesc->len;
}
-static void dispc_mgr_write_gamma_table(enum omap_channel channel)
+static void dispc_mgr_write_gamma_table(struct dispc_device *dispc,
+ enum omap_channel channel)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
- u32 *table = dispc.gamma_table[channel];
+ u32 *table = dispc->gamma_table[channel];
unsigned int i;
DSSDBG("%s: channel %d\n", __func__, channel);
@@ -3622,26 +3833,26 @@ static void dispc_mgr_write_gamma_table(enum omap_channel channel)
else if (i == 0)
v |= 1 << 31;
- dispc_write_reg(gdesc->reg, v);
+ dispc_write_reg(dispc, gdesc->reg, v);
}
}
-static void dispc_restore_gamma_tables(void)
+static void dispc_restore_gamma_tables(struct dispc_device *dispc)
{
DSSDBG("%s()\n", __func__);
- if (!dispc.feat->has_gamma_table)
+ if (!dispc->feat->has_gamma_table)
return;
- dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD);
+ dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_LCD);
- dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT);
+ dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_DIGIT);
- if (dispc_has_feature(FEAT_MGR_LCD2))
- dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD2))
+ dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_LCD2);
- if (dispc_has_feature(FEAT_MGR_LCD3))
- dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3);
+ if (dispc_has_feature(dispc, FEAT_MGR_LCD3))
+ dispc_mgr_write_gamma_table(dispc, OMAP_DSS_CHANNEL_LCD3);
}
static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
@@ -3649,18 +3860,19 @@ static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
{ .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
};
-static void dispc_mgr_set_gamma(enum omap_channel channel,
- const struct drm_color_lut *lut,
- unsigned int length)
+static void dispc_mgr_set_gamma(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length)
{
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
- u32 *table = dispc.gamma_table[channel];
+ u32 *table = dispc->gamma_table[channel];
uint i;
DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__,
channel, length, gdesc->len);
- if (!dispc.feat->has_gamma_table)
+ if (!dispc->feat->has_gamma_table)
return;
if (lut == NULL || length < 2) {
@@ -3692,82 +3904,83 @@ static void dispc_mgr_set_gamma(enum omap_channel channel,
}
}
- if (dispc.is_enabled)
- dispc_mgr_write_gamma_table(channel);
+ if (dispc->is_enabled)
+ dispc_mgr_write_gamma_table(dispc, channel);
}
-static int dispc_init_gamma_tables(void)
+static int dispc_init_gamma_tables(struct dispc_device *dispc)
{
int channel;
- if (!dispc.feat->has_gamma_table)
+ if (!dispc->feat->has_gamma_table)
return 0;
- for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) {
+ for (channel = 0; channel < ARRAY_SIZE(dispc->gamma_table); channel++) {
const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
u32 *gt;
if (channel == OMAP_DSS_CHANNEL_LCD2 &&
- !dispc_has_feature(FEAT_MGR_LCD2))
+ !dispc_has_feature(dispc, FEAT_MGR_LCD2))
continue;
if (channel == OMAP_DSS_CHANNEL_LCD3 &&
- !dispc_has_feature(FEAT_MGR_LCD3))
+ !dispc_has_feature(dispc, FEAT_MGR_LCD3))
continue;
- gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len,
- sizeof(u32), GFP_KERNEL);
+ gt = devm_kmalloc_array(&dispc->pdev->dev, gdesc->len,
+ sizeof(u32), GFP_KERNEL);
if (!gt)
return -ENOMEM;
- dispc.gamma_table[channel] = gt;
+ dispc->gamma_table[channel] = gt;
- dispc_mgr_set_gamma(channel, NULL, 0);
+ dispc_mgr_set_gamma(dispc, channel, NULL, 0);
}
return 0;
}
-static void _omap_dispc_initial_config(void)
+static void _omap_dispc_initial_config(struct dispc_device *dispc)
{
u32 l;
/* Exclusively enable DISPC_CORE_CLK and set divider to 1 */
- if (dispc_has_feature(FEAT_CORE_CLK_DIV)) {
- l = dispc_read_reg(DISPC_DIVISOR);
+ if (dispc_has_feature(dispc, FEAT_CORE_CLK_DIV)) {
+ l = dispc_read_reg(dispc, DISPC_DIVISOR);
/* Use DISPC_DIVISOR.LCD, instead of DISPC_DIVISOR1.LCD */
l = FLD_MOD(l, 1, 0, 0);
l = FLD_MOD(l, 1, 23, 16);
- dispc_write_reg(DISPC_DIVISOR, l);
+ dispc_write_reg(dispc, DISPC_DIVISOR, l);
- dispc.core_clk_rate = dispc_fclk_rate();
+ dispc->core_clk_rate = dispc_fclk_rate(dispc);
}
/* Use gamma table mode, instead of palette mode */
- if (dispc.feat->has_gamma_table)
- REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3);
+ if (dispc->feat->has_gamma_table)
+ REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 3, 3);
/* For older DSS versions (FEAT_FUNCGATED) this enables
* func-clock auto-gating. For newer versions
- * (dispc.feat->has_gamma_table) this enables tv-out gamma tables.
+ * (dispc->feat->has_gamma_table) this enables tv-out gamma tables.
*/
- if (dispc_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
- REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
+ if (dispc_has_feature(dispc, FEAT_FUNCGATED) ||
+ dispc->feat->has_gamma_table)
+ REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 9, 9);
- dispc_setup_color_conv_coef();
+ dispc_setup_color_conv_coef(dispc);
- dispc_set_loadmode(OMAP_DSS_LOAD_FRAME_ONLY);
+ dispc_set_loadmode(dispc, OMAP_DSS_LOAD_FRAME_ONLY);
- dispc_init_fifos();
+ dispc_init_fifos(dispc);
- dispc_configure_burst_sizes();
+ dispc_configure_burst_sizes(dispc);
- dispc_ovl_enable_zorder_planes();
+ dispc_ovl_enable_zorder_planes(dispc);
- if (dispc.feat->mstandby_workaround)
- REG_FLD_MOD(DISPC_MSTANDBY_CTRL, 1, 0, 0);
+ if (dispc->feat->mstandby_workaround)
+ REG_FLD_MOD(dispc, DISPC_MSTANDBY_CTRL, 1, 0, 0);
- if (dispc_has_feature(FEAT_MFLAG))
- dispc_init_mflag();
+ if (dispc_has_feature(dispc, FEAT_MFLAG))
+ dispc_init_mflag(dispc);
}
static const enum dispc_feature_id omap2_dispc_features_list[] = {
@@ -4286,49 +4499,52 @@ static const struct dispc_features omap54xx_dispc_feats = {
static irqreturn_t dispc_irq_handler(int irq, void *arg)
{
- if (!dispc.is_enabled)
+ struct dispc_device *dispc = arg;
+
+ if (!dispc->is_enabled)
return IRQ_NONE;
- return dispc.user_handler(irq, dispc.user_data);
+ return dispc->user_handler(irq, dispc->user_data);
}
-static int dispc_request_irq(irq_handler_t handler, void *dev_id)
+static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler,
+ void *dev_id)
{
int r;
- if (dispc.user_handler != NULL)
+ if (dispc->user_handler != NULL)
return -EBUSY;
- dispc.user_handler = handler;
- dispc.user_data = dev_id;
+ dispc->user_handler = handler;
+ dispc->user_data = dev_id;
/* ensure the dispc_irq_handler sees the values above */
smp_wmb();
- r = devm_request_irq(&dispc.pdev->dev, dispc.irq, dispc_irq_handler,
- IRQF_SHARED, "OMAP DISPC", &dispc);
+ r = devm_request_irq(&dispc->pdev->dev, dispc->irq, dispc_irq_handler,
+ IRQF_SHARED, "OMAP DISPC", dispc);
if (r) {
- dispc.user_handler = NULL;
- dispc.user_data = NULL;
+ dispc->user_handler = NULL;
+ dispc->user_data = NULL;
}
return r;
}
-static void dispc_free_irq(void *dev_id)
+static void dispc_free_irq(struct dispc_device *dispc, void *dev_id)
{
- devm_free_irq(&dispc.pdev->dev, dispc.irq, &dispc);
+ devm_free_irq(&dispc->pdev->dev, dispc->irq, dispc);
- dispc.user_handler = NULL;
- dispc.user_data = NULL;
+ dispc->user_handler = NULL;
+ dispc->user_data = NULL;
}
-static u32 dispc_get_memory_bandwidth_limit(void)
+static u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc)
{
u32 limit = 0;
/* Optional maximum memory bandwidth */
- of_property_read_u32(dispc.pdev->dev.of_node, "max-memory-bandwidth",
+ of_property_read_u32(dispc->pdev->dev.of_node, "max-memory-bandwidth",
&limit);
return limit;
@@ -4405,18 +4621,19 @@ static struct i734_buf {
void *vaddr;
} i734_buf;
-static int dispc_errata_i734_wa_init(void)
+static int dispc_errata_i734_wa_init(struct dispc_device *dispc)
{
- if (!dispc.feat->has_gamma_i734_bug)
+ if (!dispc->feat->has_gamma_i734_bug)
return 0;
i734_buf.size = i734.ovli.width * i734.ovli.height *
color_mode_to_bpp(i734.ovli.fourcc) / 8;
- i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size,
- &i734_buf.paddr, GFP_KERNEL);
+ i734_buf.vaddr = dma_alloc_writecombine(&dispc->pdev->dev,
+ i734_buf.size, &i734_buf.paddr,
+ GFP_KERNEL);
if (!i734_buf.vaddr) {
- dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed",
+ dev_err(&dispc->pdev->dev, "%s: dma_alloc_writecombine failed",
__func__);
return -ENOMEM;
}
@@ -4424,72 +4641,73 @@ static int dispc_errata_i734_wa_init(void)
return 0;
}
-static void dispc_errata_i734_wa_fini(void)
+static void dispc_errata_i734_wa_fini(struct dispc_device *dispc)
{
- if (!dispc.feat->has_gamma_i734_bug)
+ if (!dispc->feat->has_gamma_i734_bug)
return;
- dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr,
+ dma_free_writecombine(&dispc->pdev->dev, i734_buf.size, i734_buf.vaddr,
i734_buf.paddr);
}
-static void dispc_errata_i734_wa(void)
+static void dispc_errata_i734_wa(struct dispc_device *dispc)
{
- u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD);
+ u32 framedone_irq = dispc_mgr_get_framedone_irq(dispc,
+ OMAP_DSS_CHANNEL_LCD);
struct omap_overlay_info ovli;
struct dss_lcd_mgr_config lcd_conf;
u32 gatestate;
unsigned int count;
- if (!dispc.feat->has_gamma_i734_bug)
+ if (!dispc->feat->has_gamma_i734_bug)
return;
- gatestate = REG_GET(DISPC_CONFIG, 8, 4);
+ gatestate = REG_GET(dispc, DISPC_CONFIG, 8, 4);
ovli = i734.ovli;
ovli.paddr = i734_buf.paddr;
lcd_conf = i734.lcd_conf;
/* Gate all LCD1 outputs */
- REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, 0x1f, 8, 4);
/* Setup and enable GFX plane */
- dispc_ovl_setup(OMAP_DSS_GFX, &ovli, &i734.vm, false,
- OMAP_DSS_CHANNEL_LCD);
- dispc_ovl_enable(OMAP_DSS_GFX, true);
+ dispc_ovl_setup(dispc, OMAP_DSS_GFX, &ovli, &i734.vm, false,
+ OMAP_DSS_CHANNEL_LCD);
+ dispc_ovl_enable(dispc, OMAP_DSS_GFX, true);
/* Set up and enable display manager for LCD1 */
- dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri);
- dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
+ dispc_mgr_setup(dispc, OMAP_DSS_CHANNEL_LCD, &i734.mgri);
+ dispc_calc_clock_rates(dispc, dss_get_dispc_clk_rate(dispc->dss),
&lcd_conf.clock_info);
- dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
- dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.vm);
+ dispc_mgr_set_lcd_config(dispc, OMAP_DSS_CHANNEL_LCD, &lcd_conf);
+ dispc_mgr_set_timings(dispc, OMAP_DSS_CHANNEL_LCD, &i734.vm);
- dispc_clear_irqstatus(framedone_irq);
+ dispc_clear_irqstatus(dispc, framedone_irq);
/* Enable and shut the channel to produce just one frame */
- dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true);
- dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false);
+ dispc_mgr_enable(dispc, OMAP_DSS_CHANNEL_LCD, true);
+ dispc_mgr_enable(dispc, OMAP_DSS_CHANNEL_LCD, false);
/* Busy wait for framedone. We can't fiddle with irq handlers
* in PM resume. Typically the loop runs less than 5 times and
* waits less than a micro second.
*/
count = 0;
- while (!(dispc_read_irqstatus() & framedone_irq)) {
+ while (!(dispc_read_irqstatus(dispc) & framedone_irq)) {
if (count++ > 10000) {
- dev_err(&dispc.pdev->dev, "%s: framedone timeout\n",
+ dev_err(&dispc->pdev->dev, "%s: framedone timeout\n",
__func__);
break;
}
}
- dispc_ovl_enable(OMAP_DSS_GFX, false);
+ dispc_ovl_enable(dispc, OMAP_DSS_GFX, false);
/* Clear all irq bits before continuing */
- dispc_clear_irqstatus(0xffffffff);
+ dispc_clear_irqstatus(dispc, 0xffffffff);
/* Restore the original state to LCD1 output gates */
- REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4);
+ REG_FLD_MOD(dispc, DISPC_CONFIG, gatestate, 8, 4);
}
static const struct dispc_ops dispc_ops = {
@@ -4525,6 +4743,12 @@ static const struct dispc_ops dispc_ops = {
.ovl_enable = dispc_ovl_enable,
.ovl_setup = dispc_ovl_setup,
.ovl_get_color_modes = dispc_ovl_get_color_modes,
+
+ .wb_get_framedone_irq = dispc_wb_get_framedone_irq,
+ .wb_setup = dispc_wb_setup,
+ .has_writeback = dispc_has_writeback,
+ .wb_go_busy = dispc_wb_go_busy,
+ .wb_go = dispc_wb_go,
};
/* DISPC HW IP initialisation */
@@ -4550,14 +4774,22 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
const struct soc_device_attribute *soc;
+ struct dss_device *dss = dss_get_device(master);
+ struct dispc_device *dispc;
u32 rev;
int r = 0;
struct resource *dispc_mem;
struct device_node *np = pdev->dev.of_node;
- dispc.pdev = pdev;
+ dispc = kzalloc(sizeof(*dispc), GFP_KERNEL);
+ if (!dispc)
+ return -ENOMEM;
+
+ dispc->pdev = pdev;
+ platform_set_drvdata(pdev, dispc);
+ dispc->dss = dss;
- spin_lock_init(&dispc.control_lock);
+ spin_lock_init(&dispc->control_lock);
/*
* The OMAP3-based models can't be told apart using the compatible
@@ -4565,76 +4797,92 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
*/
soc = soc_device_match(dispc_soc_devices);
if (soc)
- dispc.feat = soc->data;
+ dispc->feat = soc->data;
else
- dispc.feat = of_match_device(dispc_of_match, &pdev->dev)->data;
+ dispc->feat = of_match_device(dispc_of_match, &pdev->dev)->data;
- r = dispc_errata_i734_wa_init();
+ r = dispc_errata_i734_wa_init(dispc);
if (r)
- return r;
+ goto err_free;
- dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
- dispc.base = devm_ioremap_resource(&pdev->dev, dispc_mem);
- if (IS_ERR(dispc.base))
- return PTR_ERR(dispc.base);
+ dispc_mem = platform_get_resource(dispc->pdev, IORESOURCE_MEM, 0);
+ dispc->base = devm_ioremap_resource(&pdev->dev, dispc_mem);
+ if (IS_ERR(dispc->base)) {
+ r = PTR_ERR(dispc->base);
+ goto err_free;
+ }
- dispc.irq = platform_get_irq(dispc.pdev, 0);
- if (dispc.irq < 0) {
+ dispc->irq = platform_get_irq(dispc->pdev, 0);
+ if (dispc->irq < 0) {
DSSERR("platform_get_irq failed\n");
- return -ENODEV;
+ r = -ENODEV;
+ goto err_free;
}
if (np && of_property_read_bool(np, "syscon-pol")) {
- dispc.syscon_pol = syscon_regmap_lookup_by_phandle(np, "syscon-pol");
- if (IS_ERR(dispc.syscon_pol)) {
+ dispc->syscon_pol = syscon_regmap_lookup_by_phandle(np, "syscon-pol");
+ if (IS_ERR(dispc->syscon_pol)) {
dev_err(&pdev->dev, "failed to get syscon-pol regmap\n");
- return PTR_ERR(dispc.syscon_pol);
+ r = PTR_ERR(dispc->syscon_pol);
+ goto err_free;
}
if (of_property_read_u32_index(np, "syscon-pol", 1,
- &dispc.syscon_pol_offset)) {
+ &dispc->syscon_pol_offset)) {
dev_err(&pdev->dev, "failed to get syscon-pol offset\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto err_free;
}
}
- r = dispc_init_gamma_tables();
+ r = dispc_init_gamma_tables(dispc);
if (r)
- return r;
+ goto err_free;
pm_runtime_enable(&pdev->dev);
- r = dispc_runtime_get();
+ r = dispc_runtime_get(dispc);
if (r)
goto err_runtime_get;
- _omap_dispc_initial_config();
+ _omap_dispc_initial_config(dispc);
- rev = dispc_read_reg(DISPC_REVISION);
+ rev = dispc_read_reg(dispc, DISPC_REVISION);
dev_dbg(&pdev->dev, "OMAP DISPC rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- dispc_runtime_put();
+ dispc_runtime_put(dispc);
- dispc_set_ops(&dispc_ops);
+ dss->dispc = dispc;
+ dss->dispc_ops = &dispc_ops;
- dss_debugfs_create_file("dispc", dispc_dump_regs);
+ dispc->debugfs = dss_debugfs_create_file(dss, "dispc", dispc_dump_regs,
+ dispc);
return 0;
err_runtime_get:
pm_runtime_disable(&pdev->dev);
+err_free:
+ kfree(dispc);
return r;
}
-static void dispc_unbind(struct device *dev, struct device *master,
- void *data)
+static void dispc_unbind(struct device *dev, struct device *master, void *data)
{
- dispc_set_ops(NULL);
+ struct dispc_device *dispc = dev_get_drvdata(dev);
+ struct dss_device *dss = dispc->dss;
+
+ dss_debugfs_remove_file(dispc->debugfs);
+
+ dss->dispc = NULL;
+ dss->dispc_ops = NULL;
pm_runtime_disable(dev);
- dispc_errata_i734_wa_fini();
+ dispc_errata_i734_wa_fini(dispc);
+
+ kfree(dispc);
}
static const struct component_ops dispc_component_ops = {
@@ -4655,36 +4903,40 @@ static int dispc_remove(struct platform_device *pdev)
static int dispc_runtime_suspend(struct device *dev)
{
- dispc.is_enabled = false;
+ struct dispc_device *dispc = dev_get_drvdata(dev);
+
+ dispc->is_enabled = false;
/* ensure the dispc_irq_handler sees the is_enabled value */
smp_wmb();
/* wait for current handler to finish before turning the DISPC off */
- synchronize_irq(dispc.irq);
+ synchronize_irq(dispc->irq);
- dispc_save_context();
+ dispc_save_context(dispc);
return 0;
}
static int dispc_runtime_resume(struct device *dev)
{
+ struct dispc_device *dispc = dev_get_drvdata(dev);
+
/*
* The reset value for load mode is 0 (OMAP_DSS_LOAD_CLUT_AND_FRAME)
* but we always initialize it to 2 (OMAP_DSS_LOAD_FRAME_ONLY) in
* _omap_dispc_initial_config(). We can thus use it to detect if
* we have lost register context.
*/
- if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
- _omap_dispc_initial_config();
+ if (REG_GET(dispc, DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
+ _omap_dispc_initial_config(dispc);
- dispc_errata_i734_wa();
+ dispc_errata_i734_wa(dispc);
- dispc_restore_context();
+ dispc_restore_context(dispc);
- dispc_restore_gamma_tables();
+ dispc_restore_gamma_tables(dispc);
}
- dispc.is_enabled = true;
+ dispc->is_enabled = true;
/* ensure the dispc_irq_handler sees the is_enabled value */
smp_wmb();
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 0c9480ba85c0..424143128cd4 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -28,12 +28,11 @@
#include "omapdss.h"
-void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static void omapdss_default_get_timings(struct omap_dss_device *dssdev,
+ struct videomode *vm)
{
*vm = dssdev->panel.vm;
}
-EXPORT_SYMBOL(omapdss_default_get_timings);
static LIST_HEAD(panel_list);
static DEFINE_MUTEX(panel_list_mutex);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index ea44137ed08c..fb1c27f69e3a 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -38,6 +38,7 @@
struct dpi_data {
struct platform_device *pdev;
enum dss_model dss_model;
+ struct dss_device *dss;
struct regulator *vdds_dsi_reg;
enum dss_clk_source clk_src;
@@ -57,7 +58,8 @@ static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
return container_of(dssdev, struct dpi_data, output);
}
-static enum dss_clk_source dpi_get_clk_src_dra7xx(enum omap_channel channel)
+static enum dss_clk_source dpi_get_clk_src_dra7xx(struct dpi_data *dpi,
+ enum omap_channel channel)
{
/*
* Possible clock sources:
@@ -69,23 +71,23 @@ static enum dss_clk_source dpi_get_clk_src_dra7xx(enum omap_channel channel)
switch (channel) {
case OMAP_DSS_CHANNEL_LCD:
{
- if (dss_pll_find_by_src(DSS_CLK_SRC_PLL1_1))
+ if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL1_1))
return DSS_CLK_SRC_PLL1_1;
break;
}
case OMAP_DSS_CHANNEL_LCD2:
{
- if (dss_pll_find_by_src(DSS_CLK_SRC_PLL1_3))
+ if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL1_3))
return DSS_CLK_SRC_PLL1_3;
- if (dss_pll_find_by_src(DSS_CLK_SRC_PLL2_3))
+ if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL2_3))
return DSS_CLK_SRC_PLL2_3;
break;
}
case OMAP_DSS_CHANNEL_LCD3:
{
- if (dss_pll_find_by_src(DSS_CLK_SRC_PLL2_1))
+ if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL2_1))
return DSS_CLK_SRC_PLL2_1;
- if (dss_pll_find_by_src(DSS_CLK_SRC_PLL1_3))
+ if (dss_pll_find_by_src(dpi->dss, DSS_CLK_SRC_PLL1_3))
return DSS_CLK_SRC_PLL1_3;
break;
}
@@ -132,7 +134,7 @@ static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
}
case DSS_MODEL_DRA7:
- return dpi_get_clk_src_dra7xx(channel);
+ return dpi_get_clk_src_dra7xx(dpi, channel);
default:
return DSS_CLK_SRC_FCK;
@@ -141,7 +143,7 @@ static enum dss_clk_source dpi_get_clk_src(struct dpi_data *dpi)
struct dpi_clk_calc_ctx {
struct dss_pll *pll;
- unsigned clkout_idx;
+ unsigned int clkout_idx;
/* inputs */
@@ -189,8 +191,9 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
- return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
- dpi_calc_dispc_cb, ctx);
+ return dispc_div_calc(ctx->pll->dss->dispc, dispc,
+ ctx->pck_min, ctx->pck_max,
+ dpi_calc_dispc_cb, ctx);
}
@@ -206,7 +209,7 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
ctx->pll_cinfo.clkdco = clkdco;
return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
- ctx->pck_min, dss_get_max_fck_rate(),
+ ctx->pck_min, dss_get_max_fck_rate(ctx->pll->dss),
dpi_calc_hsdiv_cb, ctx);
}
@@ -216,8 +219,9 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
ctx->fck = fck;
- return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
- dpi_calc_dispc_cb, ctx);
+ return dispc_div_calc(ctx->pll->dss->dispc, fck,
+ ctx->pck_min, ctx->pck_max,
+ dpi_calc_dispc_cb, ctx);
}
static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
@@ -255,7 +259,8 @@ static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
}
}
-static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
+static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
+ struct dpi_clk_calc_ctx *ctx)
{
int i;
@@ -276,7 +281,8 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
ctx->pck_min = 0;
ctx->pck_max = pck + 1000 * i * i * i;
- ok = dss_div_calc(pck, ctx->pck_min, dpi_calc_dss_cb, ctx);
+ ok = dss_div_calc(dpi->dss, pck, ctx->pck_min,
+ dpi_calc_dss_cb, ctx);
if (ok)
return ok;
}
@@ -302,7 +308,7 @@ static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
if (r)
return r;
- dss_select_lcd_clk_source(channel, dpi->clk_src);
+ dss_select_lcd_clk_source(dpi->dss, channel, dpi->clk_src);
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
@@ -320,11 +326,11 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
int r;
bool ok;
- ok = dpi_dss_clk_calc(pck_req, &ctx);
+ ok = dpi_dss_clk_calc(dpi, pck_req, &ctx);
if (!ok)
return -EINVAL;
- r = dss_set_fck_rate(ctx.fck);
+ r = dss_set_fck_rate(dpi->dss, ctx.fck);
if (r)
return r;
@@ -339,8 +345,6 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
static int dpi_set_mode(struct dpi_data *dpi)
{
- struct omap_dss_device *out = &dpi->output;
- enum omap_channel channel = out->dispc_channel;
struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
@@ -348,8 +352,8 @@ static int dpi_set_mode(struct dpi_data *dpi)
int r = 0;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, channel, vm->pixelclock, &fck,
- &lck_div, &pck_div);
+ r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel,
+ vm->pixelclock, &fck, &lck_div, &pck_div);
else
r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck,
&lck_div, &pck_div);
@@ -365,16 +369,13 @@ static int dpi_set_mode(struct dpi_data *dpi)
vm->pixelclock = pck;
}
- dss_mgr_set_timings(channel, vm);
+ dss_mgr_set_timings(&dpi->output, vm);
return 0;
}
static void dpi_config_lcd_manager(struct dpi_data *dpi)
{
- struct omap_dss_device *out = &dpi->output;
- enum omap_channel channel = out->dispc_channel;
-
dpi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
dpi->mgr_config.stallmode = false;
@@ -384,14 +385,13 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
dpi->mgr_config.lcden_sig_polarity = 0;
- dss_mgr_set_lcd_config(channel, &dpi->mgr_config);
+ dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
}
static int dpi_display_enable(struct omap_dss_device *dssdev)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
struct omap_dss_device *out = &dpi->output;
- enum omap_channel channel = out->dispc_channel;
int r;
mutex_lock(&dpi->lock);
@@ -408,11 +408,11 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
goto err_reg_enable;
}
- r = dispc_runtime_get();
+ r = dispc_runtime_get(dpi->dss->dispc);
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(out->port_num, channel);
+ r = dss_dpi_select_source(dpi->dss, out->port_num, out->dispc_channel);
if (r)
goto err_src_sel;
@@ -430,7 +430,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mdelay(2);
- r = dss_mgr_enable(channel);
+ r = dss_mgr_enable(&dpi->output);
if (r)
goto err_mgr_enable;
@@ -444,7 +444,7 @@ err_set_mode:
dss_pll_disable(dpi->pll);
err_pll_init:
err_src_sel:
- dispc_runtime_put();
+ dispc_runtime_put(dpi->dss->dispc);
err_get_dispc:
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
@@ -457,18 +457,18 @@ err_no_out_mgr:
static void dpi_display_disable(struct omap_dss_device *dssdev)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- enum omap_channel channel = dpi->output.dispc_channel;
mutex_lock(&dpi->lock);
- dss_mgr_disable(channel);
+ dss_mgr_disable(&dpi->output);
if (dpi->pll) {
- dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
+ DSS_CLK_SRC_FCK);
dss_pll_disable(dpi->pll);
}
- dispc_runtime_put();
+ dispc_runtime_put(dpi->dss->dispc);
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
@@ -516,7 +516,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
if (vm->hactive % 8 != 0)
return -EINVAL;
- if (!dispc_mgr_timings_ok(channel, vm))
+ if (!dispc_mgr_timings_ok(dpi->dss->dispc, channel, vm))
return -EINVAL;
if (vm->pixelclock == 0)
@@ -529,7 +529,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
- ok = dpi_dss_clk_calc(vm->pixelclock, &ctx);
+ ok = dpi_dss_clk_calc(dpi, vm->pixelclock, &ctx);
if (!ok)
return -EINVAL;
@@ -602,7 +602,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
dpi->clk_src = dpi_get_clk_src(dpi);
- pll = dss_pll_find_by_src(dpi->clk_src);
+ pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
if (!pll)
return;
@@ -654,7 +654,6 @@ static int dpi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- enum omap_channel channel = dpi->output.dispc_channel;
int r;
r = dpi_init_regulator(dpi);
@@ -663,7 +662,7 @@ static int dpi_connect(struct omap_dss_device *dssdev,
dpi_init_pll(dpi);
- r = dss_mgr_connect(channel, dssdev);
+ r = dss_mgr_connect(&dpi->output, dssdev);
if (r)
return r;
@@ -671,7 +670,7 @@ static int dpi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&dpi->output, dssdev);
return r;
}
@@ -682,7 +681,6 @@ static void dpi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- enum omap_channel channel = dpi->output.dispc_channel;
WARN_ON(dst != dssdev->dst);
@@ -691,7 +689,7 @@ static void dpi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&dpi->output, dssdev);
}
static const struct omapdss_dpi_ops dpi_ops = {
@@ -748,8 +746,8 @@ static void dpi_uninit_output_port(struct device_node *port)
omapdss_unregister_output(out);
}
-int dpi_init_port(struct platform_device *pdev, struct device_node *port,
- enum dss_model dss_model)
+int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
+ struct device_node *port, enum dss_model dss_model)
{
struct dpi_data *dpi;
struct device_node *ep;
@@ -776,6 +774,7 @@ int dpi_init_port(struct platform_device *pdev, struct device_node *port,
dpi->pdev = pdev;
dpi->dss_model = dss_model;
+ dpi->dss = dss;
port->data = dpi;
mutex_init(&dpi->lock);
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 80f1f3679a3c..d4a680629825 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -119,11 +119,11 @@ struct dsi_reg { u16 module; u16 idx; };
#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C)
#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010)
-#define REG_GET(dsidev, idx, start, end) \
- FLD_GET(dsi_read_reg(dsidev, idx), start, end)
+#define REG_GET(dsi, idx, start, end) \
+ FLD_GET(dsi_read_reg(dsi, idx), start, end)
-#define REG_FLD_MOD(dsidev, idx, val, start, end) \
- dsi_write_reg(dsidev, idx, FLD_MOD(dsi_read_reg(dsidev, idx), val, start, end))
+#define REG_FLD_MOD(dsi, idx, val, start, end) \
+ dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end))
/* Global interrupts */
#define DSI_IRQ_VC0 (1 << 0)
@@ -213,13 +213,12 @@ struct dsi_reg { u16 module; u16 idx; };
DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5)
typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
+struct dsi_data;
-static int dsi_display_init_dispc(struct platform_device *dsidev,
- enum omap_channel channel);
-static void dsi_display_uninit_dispc(struct platform_device *dsidev,
- enum omap_channel channel);
+static int dsi_display_init_dispc(struct dsi_data *dsi);
+static void dsi_display_uninit_dispc(struct dsi_data *dsi);
-static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
+static int dsi_vc_send_null(struct dsi_data *dsi, int channel);
/* DSI PLL HSDIV indices */
#define HSDIV_DISPC 0
@@ -269,10 +268,10 @@ enum dsi_vc_source {
struct dsi_irq_stats {
unsigned long last_reset;
- unsigned irq_count;
- unsigned dsi_irqs[32];
- unsigned vc_irqs[4][32];
- unsigned cio_irqs[32];
+ unsigned int irq_count;
+ unsigned int dsi_irqs[32];
+ unsigned int vc_irqs[4][32];
+ unsigned int cio_irqs[32];
};
struct dsi_isr_tables {
@@ -282,7 +281,7 @@ struct dsi_isr_tables {
};
struct dsi_clk_calc_ctx {
- struct platform_device *dsidev;
+ struct dsi_data *dsi;
struct dss_pll *pll;
/* inputs */
@@ -329,7 +328,7 @@ struct dsi_of_data {
};
struct dsi_data {
- struct platform_device *pdev;
+ struct device *dev;
void __iomem *proto_base;
void __iomem *phy_base;
void __iomem *pll_base;
@@ -343,6 +342,7 @@ struct dsi_data {
struct clk *dss_clk;
struct regmap *syscon;
+ struct dss_device *dss;
struct dispc_clock_info user_dispc_cinfo;
struct dss_pll_clock_info user_dsi_cinfo;
@@ -373,7 +373,7 @@ struct dsi_data {
int update_channel;
#ifdef DSI_PERF_MEASURE
- unsigned update_bytes;
+ unsigned int update_bytes;
#endif
bool te_enabled;
@@ -400,19 +400,23 @@ struct dsi_data {
#endif
int debug_read;
int debug_write;
+ struct {
+ struct dss_debugfs_entry *irqs;
+ struct dss_debugfs_entry *regs;
+ } debugfs;
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spinlock_t irq_stats_lock;
struct dsi_irq_stats irq_stats;
#endif
- unsigned num_lanes_supported;
- unsigned line_buffer_size;
+ unsigned int num_lanes_supported;
+ unsigned int line_buffer_size;
struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
- unsigned num_lanes_used;
+ unsigned int num_lanes_used;
- unsigned scp_clk_refcount;
+ unsigned int scp_clk_refcount;
struct dss_lcd_mgr_config mgr_config;
struct videomode vm;
@@ -424,7 +428,7 @@ struct dsi_data {
};
struct dsi_packet_sent_handler_data {
- struct platform_device *dsidev;
+ struct dsi_data *dsi;
struct completion *completion;
};
@@ -433,17 +437,12 @@ static bool dsi_perf;
module_param(dsi_perf, bool, 0644);
#endif
-static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
+static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev)
{
- return dev_get_drvdata(&dsidev->dev);
+ return dev_get_drvdata(dssdev->dev);
}
-static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
-{
- return to_platform_device(dssdev->dev);
-}
-
-static struct platform_device *dsi_get_dsidev_from_id(int module)
+static struct dsi_data *dsi_get_dsi_from_id(int module)
{
struct omap_dss_device *out;
enum omap_dss_output_id id;
@@ -461,13 +460,12 @@ static struct platform_device *dsi_get_dsidev_from_id(int module)
out = omap_dss_get_output(id);
- return out ? to_platform_device(out->dev) : NULL;
+ return out ? to_dsi_data(out) : NULL;
}
-static inline void dsi_write_reg(struct platform_device *dsidev,
- const struct dsi_reg idx, u32 val)
+static inline void dsi_write_reg(struct dsi_data *dsi,
+ const struct dsi_reg idx, u32 val)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
void __iomem *base;
switch(idx.module) {
@@ -480,10 +478,8 @@ static inline void dsi_write_reg(struct platform_device *dsidev,
__raw_writel(val, base + idx.idx);
}
-static inline u32 dsi_read_reg(struct platform_device *dsidev,
- const struct dsi_reg idx)
+static inline u32 dsi_read_reg(struct dsi_data *dsi, const struct dsi_reg idx)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
void __iomem *base;
switch(idx.module) {
@@ -498,24 +494,20 @@ static inline u32 dsi_read_reg(struct platform_device *dsidev,
static void dsi_bus_lock(struct omap_dss_device *dssdev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
down(&dsi->bus_lock);
}
static void dsi_bus_unlock(struct omap_dss_device *dssdev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
up(&dsi->bus_lock);
}
-static bool dsi_bus_is_locked(struct platform_device *dsidev)
+static bool dsi_bus_is_locked(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
return dsi->bus_lock.count == 0;
}
@@ -524,8 +516,9 @@ static void dsi_completion_handler(void *data, u32 mask)
complete((struct completion *)data);
}
-static inline int wait_for_bit_change(struct platform_device *dsidev,
- const struct dsi_reg idx, int bitnum, int value)
+static inline bool wait_for_bit_change(struct dsi_data *dsi,
+ const struct dsi_reg idx,
+ int bitnum, int value)
{
unsigned long timeout;
ktime_t wait;
@@ -534,22 +527,22 @@ static inline int wait_for_bit_change(struct platform_device *dsidev,
/* first busyloop to see if the bit changes right away */
t = 100;
while (t-- > 0) {
- if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
- return value;
+ if (REG_GET(dsi, idx, bitnum, bitnum) == value)
+ return true;
}
/* then loop for 500ms, sleeping for 1ms in between */
timeout = jiffies + msecs_to_jiffies(500);
while (time_before(jiffies, timeout)) {
- if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
- return value;
+ if (REG_GET(dsi, idx, bitnum, bitnum) == value)
+ return true;
wait = ns_to_ktime(1000 * 1000);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
}
- return !value;
+ return false;
}
static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
@@ -569,21 +562,18 @@ static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
}
#ifdef DSI_PERF_MEASURE
-static void dsi_perf_mark_setup(struct platform_device *dsidev)
+static void dsi_perf_mark_setup(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
dsi->perf_setup_time = ktime_get();
}
-static void dsi_perf_mark_start(struct platform_device *dsidev)
+static void dsi_perf_mark_start(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
dsi->perf_start_time = ktime_get();
}
-static void dsi_perf_show(struct platform_device *dsidev, const char *name)
+static void dsi_perf_show(struct dsi_data *dsi, const char *name)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
ktime_t t, setup_time, trans_time;
u32 total_bytes;
u32 setup_us, trans_us, total_us;
@@ -617,16 +607,15 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
total_bytes * 1000 / total_us);
}
#else
-static inline void dsi_perf_mark_setup(struct platform_device *dsidev)
+static inline void dsi_perf_mark_setup(struct dsi_data *dsi)
{
}
-static inline void dsi_perf_mark_start(struct platform_device *dsidev)
+static inline void dsi_perf_mark_start(struct dsi_data *dsi)
{
}
-static inline void dsi_perf_show(struct platform_device *dsidev,
- const char *name)
+static inline void dsi_perf_show(struct dsi_data *dsi, const char *name)
{
}
#endif
@@ -723,10 +712,9 @@ static void print_irq_status_cio(u32 status)
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
- u32 *vcstatus, u32 ciostatus)
+static void dsi_collect_irq_stats(struct dsi_data *dsi, u32 irqstatus,
+ u32 *vcstatus, u32 ciostatus)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
spin_lock(&dsi->irq_stats_lock);
@@ -742,15 +730,14 @@ static void dsi_collect_irq_stats(struct platform_device *dsidev, u32 irqstatus,
spin_unlock(&dsi->irq_stats_lock);
}
#else
-#define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
+#define dsi_collect_irq_stats(dsi, irqstatus, vcstatus, ciostatus)
#endif
static int debug_irq;
-static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
- u32 *vcstatus, u32 ciostatus)
+static void dsi_handle_irq_errors(struct dsi_data *dsi, u32 irqstatus,
+ u32 *vcstatus, u32 ciostatus)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
if (irqstatus & DSI_IRQ_ERROR_MASK) {
@@ -782,7 +769,7 @@ static void dsi_handle_irq_errors(struct platform_device *dsidev, u32 irqstatus,
}
static void dsi_call_isrs(struct dsi_isr_data *isr_array,
- unsigned isr_array_size, u32 irqstatus)
+ unsigned int isr_array_size, u32 irqstatus)
{
struct dsi_isr_data *isr_data;
int i;
@@ -819,20 +806,16 @@ static void dsi_handle_isrs(struct dsi_isr_tables *isr_tables,
static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
{
- struct platform_device *dsidev;
- struct dsi_data *dsi;
+ struct dsi_data *dsi = arg;
u32 irqstatus, vcstatus[4], ciostatus;
int i;
- dsidev = (struct platform_device *) arg;
- dsi = dsi_get_dsidrv_data(dsidev);
-
if (!dsi->is_enabled)
return IRQ_NONE;
spin_lock(&dsi->irq_lock);
- irqstatus = dsi_read_reg(dsidev, DSI_IRQSTATUS);
+ irqstatus = dsi_read_reg(dsi, DSI_IRQSTATUS);
/* IRQ is not for us */
if (!irqstatus) {
@@ -840,9 +823,9 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
return IRQ_NONE;
}
- dsi_write_reg(dsidev, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
+ dsi_write_reg(dsi, DSI_IRQSTATUS, irqstatus & ~DSI_IRQ_CHANNEL_MASK);
/* flush posted write */
- dsi_read_reg(dsidev, DSI_IRQSTATUS);
+ dsi_read_reg(dsi, DSI_IRQSTATUS);
for (i = 0; i < 4; ++i) {
if ((irqstatus & (1 << i)) == 0) {
@@ -850,19 +833,19 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
continue;
}
- vcstatus[i] = dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
+ vcstatus[i] = dsi_read_reg(dsi, DSI_VC_IRQSTATUS(i));
- dsi_write_reg(dsidev, DSI_VC_IRQSTATUS(i), vcstatus[i]);
+ dsi_write_reg(dsi, DSI_VC_IRQSTATUS(i), vcstatus[i]);
/* flush posted write */
- dsi_read_reg(dsidev, DSI_VC_IRQSTATUS(i));
+ dsi_read_reg(dsi, DSI_VC_IRQSTATUS(i));
}
if (irqstatus & DSI_IRQ_COMPLEXIO_ERR) {
- ciostatus = dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
+ ciostatus = dsi_read_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS);
- dsi_write_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
+ dsi_write_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS, ciostatus);
/* flush posted write */
- dsi_read_reg(dsidev, DSI_COMPLEXIO_IRQ_STATUS);
+ dsi_read_reg(dsi, DSI_COMPLEXIO_IRQ_STATUS);
} else {
ciostatus = 0;
}
@@ -881,19 +864,20 @@ static irqreturn_t omap_dsi_irq_handler(int irq, void *arg)
dsi_handle_isrs(&dsi->isr_tables_copy, irqstatus, vcstatus, ciostatus);
- dsi_handle_irq_errors(dsidev, irqstatus, vcstatus, ciostatus);
+ dsi_handle_irq_errors(dsi, irqstatus, vcstatus, ciostatus);
- dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus);
+ dsi_collect_irq_stats(dsi, irqstatus, vcstatus, ciostatus);
return IRQ_HANDLED;
}
/* dsi->irq_lock has to be locked by the caller */
-static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
- struct dsi_isr_data *isr_array,
- unsigned isr_array_size, u32 default_mask,
- const struct dsi_reg enable_reg,
- const struct dsi_reg status_reg)
+static void _omap_dsi_configure_irqs(struct dsi_data *dsi,
+ struct dsi_isr_data *isr_array,
+ unsigned int isr_array_size,
+ u32 default_mask,
+ const struct dsi_reg enable_reg,
+ const struct dsi_reg status_reg)
{
struct dsi_isr_data *isr_data;
u32 mask;
@@ -911,54 +895,48 @@ static void _omap_dsi_configure_irqs(struct platform_device *dsidev,
mask |= isr_data->mask;
}
- old_mask = dsi_read_reg(dsidev, enable_reg);
+ old_mask = dsi_read_reg(dsi, enable_reg);
/* clear the irqstatus for newly enabled irqs */
- dsi_write_reg(dsidev, status_reg, (mask ^ old_mask) & mask);
- dsi_write_reg(dsidev, enable_reg, mask);
+ dsi_write_reg(dsi, status_reg, (mask ^ old_mask) & mask);
+ dsi_write_reg(dsi, enable_reg, mask);
/* flush posted writes */
- dsi_read_reg(dsidev, enable_reg);
- dsi_read_reg(dsidev, status_reg);
+ dsi_read_reg(dsi, enable_reg);
+ dsi_read_reg(dsi, status_reg);
}
/* dsi->irq_lock has to be locked by the caller */
-static void _omap_dsi_set_irqs(struct platform_device *dsidev)
+static void _omap_dsi_set_irqs(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 mask = DSI_IRQ_ERROR_MASK;
#ifdef DSI_CATCH_MISSING_TE
mask |= DSI_IRQ_TE_TRIGGER;
#endif
- _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table,
+ _omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table,
ARRAY_SIZE(dsi->isr_tables.isr_table), mask,
DSI_IRQENABLE, DSI_IRQSTATUS);
}
/* dsi->irq_lock has to be locked by the caller */
-static void _omap_dsi_set_irqs_vc(struct platform_device *dsidev, int vc)
+static void _omap_dsi_set_irqs_vc(struct dsi_data *dsi, int vc)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_vc[vc],
+ _omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table_vc[vc],
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc]),
DSI_VC_IRQ_ERROR_MASK,
DSI_VC_IRQENABLE(vc), DSI_VC_IRQSTATUS(vc));
}
/* dsi->irq_lock has to be locked by the caller */
-static void _omap_dsi_set_irqs_cio(struct platform_device *dsidev)
+static void _omap_dsi_set_irqs_cio(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- _omap_dsi_configure_irqs(dsidev, dsi->isr_tables.isr_table_cio,
+ _omap_dsi_configure_irqs(dsi, dsi->isr_tables.isr_table_cio,
ARRAY_SIZE(dsi->isr_tables.isr_table_cio),
DSI_CIO_IRQ_ERROR_MASK,
DSI_COMPLEXIO_IRQ_ENABLE, DSI_COMPLEXIO_IRQ_STATUS);
}
-static void _dsi_initialize_irq(struct platform_device *dsidev)
+static void _dsi_initialize_irq(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int vc;
@@ -966,16 +944,16 @@ static void _dsi_initialize_irq(struct platform_device *dsidev)
memset(&dsi->isr_tables, 0, sizeof(dsi->isr_tables));
- _omap_dsi_set_irqs(dsidev);
+ _omap_dsi_set_irqs(dsi);
for (vc = 0; vc < 4; ++vc)
- _omap_dsi_set_irqs_vc(dsidev, vc);
- _omap_dsi_set_irqs_cio(dsidev);
+ _omap_dsi_set_irqs_vc(dsi, vc);
+ _omap_dsi_set_irqs_cio(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
}
static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
- struct dsi_isr_data *isr_array, unsigned isr_array_size)
+ struct dsi_isr_data *isr_array, unsigned int isr_array_size)
{
struct dsi_isr_data *isr_data;
int free_idx;
@@ -1009,7 +987,7 @@ static int _dsi_register_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
}
static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
- struct dsi_isr_data *isr_array, unsigned isr_array_size)
+ struct dsi_isr_data *isr_array, unsigned int isr_array_size)
{
struct dsi_isr_data *isr_data;
int i;
@@ -1030,10 +1008,9 @@ static int _dsi_unregister_isr(omap_dsi_isr_t isr, void *arg, u32 mask,
return -EINVAL;
}
-static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
- void *arg, u32 mask)
+static int dsi_register_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
+ void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1043,17 +1020,16 @@ static int dsi_register_isr(struct platform_device *dsidev, omap_dsi_isr_t isr,
ARRAY_SIZE(dsi->isr_tables.isr_table));
if (r == 0)
- _omap_dsi_set_irqs(dsidev);
+ _omap_dsi_set_irqs(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_unregister_isr(struct platform_device *dsidev,
- omap_dsi_isr_t isr, void *arg, u32 mask)
+static int dsi_unregister_isr(struct dsi_data *dsi, omap_dsi_isr_t isr,
+ void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1063,17 +1039,16 @@ static int dsi_unregister_isr(struct platform_device *dsidev,
ARRAY_SIZE(dsi->isr_tables.isr_table));
if (r == 0)
- _omap_dsi_set_irqs(dsidev);
+ _omap_dsi_set_irqs(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
- omap_dsi_isr_t isr, void *arg, u32 mask)
+static int dsi_register_isr_vc(struct dsi_data *dsi, int channel,
+ omap_dsi_isr_t isr, void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1084,17 +1059,16 @@ static int dsi_register_isr_vc(struct platform_device *dsidev, int channel,
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
if (r == 0)
- _omap_dsi_set_irqs_vc(dsidev, channel);
+ _omap_dsi_set_irqs_vc(dsi, channel);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
- omap_dsi_isr_t isr, void *arg, u32 mask)
+static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel,
+ omap_dsi_isr_t isr, void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1105,17 +1079,16 @@ static int dsi_unregister_isr_vc(struct platform_device *dsidev, int channel,
ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel]));
if (r == 0)
- _omap_dsi_set_irqs_vc(dsidev, channel);
+ _omap_dsi_set_irqs_vc(dsi, channel);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_register_isr_cio(struct platform_device *dsidev,
- omap_dsi_isr_t isr, void *arg, u32 mask)
+static int dsi_register_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
+ void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1125,17 +1098,16 @@ static int dsi_register_isr_cio(struct platform_device *dsidev,
ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
if (r == 0)
- _omap_dsi_set_irqs_cio(dsidev);
+ _omap_dsi_set_irqs_cio(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static int dsi_unregister_isr_cio(struct platform_device *dsidev,
- omap_dsi_isr_t isr, void *arg, u32 mask)
+static int dsi_unregister_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr,
+ void *arg, u32 mask)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
int r;
@@ -1145,18 +1117,18 @@ static int dsi_unregister_isr_cio(struct platform_device *dsidev,
ARRAY_SIZE(dsi->isr_tables.isr_table_cio));
if (r == 0)
- _omap_dsi_set_irqs_cio(dsidev);
+ _omap_dsi_set_irqs_cio(dsi);
spin_unlock_irqrestore(&dsi->irq_lock, flags);
return r;
}
-static u32 dsi_get_errors(struct platform_device *dsidev)
+static u32 dsi_get_errors(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
u32 e;
+
spin_lock_irqsave(&dsi->errors_lock, flags);
e = dsi->errors;
dsi->errors = 0;
@@ -1164,38 +1136,35 @@ static u32 dsi_get_errors(struct platform_device *dsidev)
return e;
}
-static int dsi_runtime_get(struct platform_device *dsidev)
+static int dsi_runtime_get(struct dsi_data *dsi)
{
int r;
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DSSDBG("dsi_runtime_get\n");
- r = pm_runtime_get_sync(&dsi->pdev->dev);
+ r = pm_runtime_get_sync(dsi->dev);
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
-static void dsi_runtime_put(struct platform_device *dsidev)
+static void dsi_runtime_put(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
DSSDBG("dsi_runtime_put\n");
- r = pm_runtime_put_sync(&dsi->pdev->dev);
+ r = pm_runtime_put_sync(dsi->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
-static int dsi_regulator_init(struct platform_device *dsidev)
+static int dsi_regulator_init(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct regulator *vdds_dsi;
if (dsi->vdds_dsi_reg != NULL)
return 0;
- vdds_dsi = devm_regulator_get(&dsi->pdev->dev, "vdd");
+ vdds_dsi = devm_regulator_get(dsi->dev, "vdd");
if (IS_ERR(vdds_dsi)) {
if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
@@ -1208,16 +1177,15 @@ static int dsi_regulator_init(struct platform_device *dsidev)
return 0;
}
-static void _dsi_print_reset_status(struct platform_device *dsidev)
+static void _dsi_print_reset_status(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 l;
int b0, b1, b2;
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
- l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
+ l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
if (dsi->data->quirks & DSI_QUIRK_REVERSE_TXCLKESC) {
b0 = 28;
@@ -1230,7 +1198,7 @@ static void _dsi_print_reset_status(struct platform_device *dsidev)
}
#define DSI_FLD_GET(fld, start, end)\
- FLD_GET(dsi_read_reg(dsidev, DSI_##fld), start, end)
+ FLD_GET(dsi_read_reg(dsi, DSI_##fld), start, end)
pr_debug("DSI resets: PLL (%d) CIO (%d) PHY (%x%x%x, %d, %d, %d)\n",
DSI_FLD_GET(PLL_STATUS, 0, 0),
@@ -1245,53 +1213,48 @@ static void _dsi_print_reset_status(struct platform_device *dsidev)
#undef DSI_FLD_GET
}
-static inline int dsi_if_enable(struct platform_device *dsidev, bool enable)
+static inline int dsi_if_enable(struct dsi_data *dsi, bool enable)
{
DSSDBG("dsi_if_enable(%d)\n", enable);
enable = enable ? 1 : 0;
- REG_FLD_MOD(dsidev, DSI_CTRL, enable, 0, 0); /* IF_EN */
+ REG_FLD_MOD(dsi, DSI_CTRL, enable, 0, 0); /* IF_EN */
- if (wait_for_bit_change(dsidev, DSI_CTRL, 0, enable) != enable) {
- DSSERR("Failed to set dsi_if_enable to %d\n", enable);
- return -EIO;
+ if (!wait_for_bit_change(dsi, DSI_CTRL, 0, enable)) {
+ DSSERR("Failed to set dsi_if_enable to %d\n", enable);
+ return -EIO;
}
return 0;
}
-static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev)
+static unsigned long dsi_get_pll_hsdiv_dispc_rate(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
return dsi->pll.cinfo.clkout[HSDIV_DISPC];
}
-static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct platform_device *dsidev)
+static unsigned long dsi_get_pll_hsdiv_dsi_rate(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
return dsi->pll.cinfo.clkout[HSDIV_DSI];
}
-static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
+static unsigned long dsi_get_txbyteclkhs(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
return dsi->pll.cinfo.clkdco / 16;
}
-static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
+static unsigned long dsi_fclk_rate(struct dsi_data *dsi)
{
unsigned long r;
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ enum dss_clk_source source;
- if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) {
+ source = dss_get_dsi_clk_source(dsi->dss, dsi->module_id);
+ if (source == DSS_CLK_SRC_FCK) {
/* DSI FCLK source is DSS_CLK_FCK */
r = clk_get_rate(dsi->dss_clk);
} else {
/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
- r = dsi_get_pll_hsdiv_dsi_rate(dsidev);
+ r = dsi_get_pll_hsdiv_dsi_rate(dsi);
}
return r;
@@ -1301,7 +1264,7 @@ static int dsi_lp_clock_calc(unsigned long dsi_fclk,
unsigned long lp_clk_min, unsigned long lp_clk_max,
struct dsi_lp_clock_info *lp_cinfo)
{
- unsigned lp_clk_div;
+ unsigned int lp_clk_div;
unsigned long lp_clk;
lp_clk_div = DIV_ROUND_UP(dsi_fclk, lp_clk_max * 2);
@@ -1316,13 +1279,12 @@ static int dsi_lp_clock_calc(unsigned long dsi_fclk,
return 0;
}
-static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
+static int dsi_set_lp_clk_divisor(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long dsi_fclk;
- unsigned lp_clk_div;
+ unsigned int lp_clk_div;
unsigned long lp_clk;
- unsigned lpdiv_max = dsi->data->max_pll_lpdiv;
+ unsigned int lpdiv_max = dsi->data->max_pll_lpdiv;
lp_clk_div = dsi->user_lp_cinfo.lp_clk_div;
@@ -1330,7 +1292,7 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
if (lp_clk_div == 0 || lp_clk_div > lpdiv_max)
return -EINVAL;
- dsi_fclk = dsi_fclk_rate(dsidev);
+ dsi_fclk = dsi_fclk_rate(dsi);
lp_clk = dsi_fclk / 2 / lp_clk_div;
@@ -1339,29 +1301,25 @@ static int dsi_set_lp_clk_divisor(struct platform_device *dsidev)
dsi->current_lp_cinfo.lp_clk_div = lp_clk_div;
/* LP_CLK_DIVISOR */
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, lp_clk_div, 12, 0);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, lp_clk_div, 12, 0);
/* LP_RX_SYNCHRO_ENABLE */
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, dsi_fclk > 30000000 ? 1 : 0, 21, 21);
return 0;
}
-static void dsi_enable_scp_clk(struct platform_device *dsidev)
+static void dsi_enable_scp_clk(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
if (dsi->scp_clk_refcount++ == 0)
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1, 14, 14); /* CIO_CLK_ICG */
}
-static void dsi_disable_scp_clk(struct platform_device *dsidev)
+static void dsi_disable_scp_clk(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
WARN_ON(dsi->scp_clk_refcount == 0);
if (--dsi->scp_clk_refcount == 0)
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 14, 14); /* CIO_CLK_ICG */
}
enum dsi_pll_power_state {
@@ -1371,10 +1329,8 @@ enum dsi_pll_power_state {
DSI_PLL_POWER_ON_DIV = 0x3,
};
-static int dsi_pll_power(struct platform_device *dsidev,
- enum dsi_pll_power_state state)
+static int dsi_pll_power(struct dsi_data *dsi, enum dsi_pll_power_state state)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int t = 0;
/* DSI-PLL power command 0x3 is not working */
@@ -1383,10 +1339,10 @@ static int dsi_pll_power(struct platform_device *dsidev,
state = DSI_PLL_POWER_ON_ALL;
/* PLL_PWR_CMD */
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, state, 31, 30);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, state, 31, 30);
/* PLL_PWR_STATUS */
- while (FLD_GET(dsi_read_reg(dsidev, DSI_CLK_CTRL), 29, 28) != state) {
+ while (FLD_GET(dsi_read_reg(dsi, DSI_CLK_CTRL), 29, 28) != state) {
if (++t > 1000) {
DSSERR("Failed to set DSI PLL power mode to %d\n",
state);
@@ -1413,23 +1369,22 @@ static void dsi_pll_calc_dsi_fck(struct dsi_data *dsi,
static int dsi_pll_enable(struct dss_pll *pll)
{
struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
- struct platform_device *dsidev = dsi->pdev;
int r = 0;
DSSDBG("PLL init\n");
- r = dsi_regulator_init(dsidev);
+ r = dsi_regulator_init(dsi);
if (r)
return r;
- r = dsi_runtime_get(dsidev);
+ r = dsi_runtime_get(dsi);
if (r)
return r;
/*
* Note: SCP CLK is not required on OMAP3, but it is required on OMAP4.
*/
- dsi_enable_scp_clk(dsidev);
+ dsi_enable_scp_clk(dsi);
if (!dsi->vdds_dsi_enabled) {
r = regulator_enable(dsi->vdds_dsi_reg);
@@ -1439,20 +1394,20 @@ static int dsi_pll_enable(struct dss_pll *pll)
}
/* XXX PLL does not come out of reset without this... */
- dispc_pck_free_enable(1);
+ dispc_pck_free_enable(dsi->dss->dispc, 1);
- if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 0, 1) != 1) {
+ if (!wait_for_bit_change(dsi, DSI_PLL_STATUS, 0, 1)) {
DSSERR("PLL not coming out of reset.\n");
r = -ENODEV;
- dispc_pck_free_enable(0);
+ dispc_pck_free_enable(dsi->dss->dispc, 0);
goto err1;
}
/* XXX ... but if left on, we get problems when planes do not
* fill the whole display. No idea about this */
- dispc_pck_free_enable(0);
+ dispc_pck_free_enable(dsi->dss->dispc, 0);
- r = dsi_pll_power(dsidev, DSI_PLL_POWER_ON_ALL);
+ r = dsi_pll_power(dsi, DSI_PLL_POWER_ON_ALL);
if (r)
goto err1;
@@ -1466,24 +1421,22 @@ err1:
dsi->vdds_dsi_enabled = false;
}
err0:
- dsi_disable_scp_clk(dsidev);
- dsi_runtime_put(dsidev);
+ dsi_disable_scp_clk(dsi);
+ dsi_runtime_put(dsi);
return r;
}
-static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes)
+static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- dsi_pll_power(dsidev, DSI_PLL_POWER_OFF);
+ dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
if (disconnect_lanes) {
WARN_ON(!dsi->vdds_dsi_enabled);
regulator_disable(dsi->vdds_dsi_reg);
dsi->vdds_dsi_enabled = false;
}
- dsi_disable_scp_clk(dsidev);
- dsi_runtime_put(dsidev);
+ dsi_disable_scp_clk(dsi);
+ dsi_runtime_put(dsi);
DSSDBG("PLL uninit done\n");
}
@@ -1491,24 +1444,21 @@ static void dsi_pll_uninit(struct platform_device *dsidev, bool disconnect_lanes
static void dsi_pll_disable(struct dss_pll *pll)
{
struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
- struct platform_device *dsidev = dsi->pdev;
- dsi_pll_uninit(dsidev, true);
+ dsi_pll_uninit(dsi, true);
}
-static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
- struct seq_file *s)
+static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
enum dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
struct dss_pll *pll = &dsi->pll;
- dispc_clk_src = dss_get_dispc_clk_source();
- dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
+ dispc_clk_src = dss_get_dispc_clk_source(dsi->dss);
+ dsi_clk_src = dss_get_dsi_clk_source(dsi->dss, dsi_module);
- if (dsi_runtime_get(dsidev))
+ if (dsi_runtime_get(dsi))
return;
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
@@ -1543,35 +1493,33 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "dsi fclk source = %s\n",
dss_get_clk_source_name(dsi_clk_src));
- seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
+ seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsi));
seq_printf(s, "DDR_CLK\t\t%lu\n",
cinfo->clkdco / 4);
- seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsidev));
+ seq_printf(s, "TxByteClkHS\t%lu\n", dsi_get_txbyteclkhs(dsi));
seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk);
- dsi_runtime_put(dsidev);
+ dsi_runtime_put(dsi);
}
void dsi_dump_clocks(struct seq_file *s)
{
- struct platform_device *dsidev;
+ struct dsi_data *dsi;
int i;
for (i = 0; i < MAX_NUM_DSI; i++) {
- dsidev = dsi_get_dsidev_from_id(i);
- if (dsidev)
- dsi_dump_dsidev_clocks(dsidev, s);
+ dsi = dsi_get_dsi_from_id(i);
+ if (dsi)
+ dsi_dump_dsi_clocks(dsi, s);
}
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
- struct seq_file *s)
+static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned long flags;
struct dsi_irq_stats stats;
@@ -1657,29 +1605,30 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
#undef PIS
}
-static void dsi1_dump_irqs(struct seq_file *s)
+static int dsi1_dump_irqs(struct seq_file *s, void *p)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
+ struct dsi_data *dsi = dsi_get_dsi_from_id(0);
- dsi_dump_dsidev_irqs(dsidev, s);
+ dsi_dump_dsi_irqs(dsi, s);
+ return 0;
}
-static void dsi2_dump_irqs(struct seq_file *s)
+static int dsi2_dump_irqs(struct seq_file *s, void *p)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
+ struct dsi_data *dsi = dsi_get_dsi_from_id(1);
- dsi_dump_dsidev_irqs(dsidev, s);
+ dsi_dump_dsi_irqs(dsi, s);
+ return 0;
}
#endif
-static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
- struct seq_file *s)
+static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s)
{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r))
- if (dsi_runtime_get(dsidev))
+ if (dsi_runtime_get(dsi))
return;
- dsi_enable_scp_clk(dsidev);
+ dsi_enable_scp_clk(dsi);
DUMPREG(DSI_REVISION);
DUMPREG(DSI_SYSCONFIG);
@@ -1751,23 +1700,25 @@ static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
DUMPREG(DSI_PLL_CONFIGURATION1);
DUMPREG(DSI_PLL_CONFIGURATION2);
- dsi_disable_scp_clk(dsidev);
- dsi_runtime_put(dsidev);
+ dsi_disable_scp_clk(dsi);
+ dsi_runtime_put(dsi);
#undef DUMPREG
}
-static void dsi1_dump_regs(struct seq_file *s)
+static int dsi1_dump_regs(struct seq_file *s, void *p)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_id(0);
+ struct dsi_data *dsi = dsi_get_dsi_from_id(0);
- dsi_dump_dsidev_regs(dsidev, s);
+ dsi_dump_dsi_regs(dsi, s);
+ return 0;
}
-static void dsi2_dump_regs(struct seq_file *s)
+static int dsi2_dump_regs(struct seq_file *s, void *p)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_id(1);
+ struct dsi_data *dsi = dsi_get_dsi_from_id(1);
- dsi_dump_dsidev_regs(dsidev, s);
+ dsi_dump_dsi_regs(dsi, s);
+ return 0;
}
enum dsi_cio_power_state {
@@ -1776,16 +1727,15 @@ enum dsi_cio_power_state {
DSI_COMPLEXIO_POWER_ULPS = 0x2,
};
-static int dsi_cio_power(struct platform_device *dsidev,
- enum dsi_cio_power_state state)
+static int dsi_cio_power(struct dsi_data *dsi, enum dsi_cio_power_state state)
{
int t = 0;
/* PWR_CMD */
- REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG1, state, 28, 27);
+ REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG1, state, 28, 27);
/* PWR_STATUS */
- while (FLD_GET(dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1),
+ while (FLD_GET(dsi_read_reg(dsi, DSI_COMPLEXIO_CFG1),
26, 25) != state) {
if (++t > 1000) {
DSSERR("failed to set complexio power state to "
@@ -1798,9 +1748,8 @@ static int dsi_cio_power(struct platform_device *dsidev,
return 0;
}
-static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
+static unsigned int dsi_get_line_buf_size(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int val;
/* line buffer on OMAP3 is 1024 x 24bits */
@@ -1810,7 +1759,7 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
if (!(dsi->data->quirks & DSI_QUIRK_GNQ))
return 1023 * 3;
- val = REG_GET(dsidev, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
+ val = REG_GET(dsi, DSI_GNQ, 14, 12); /* VP1_LINE_BUFFER_SIZE */
switch (val) {
case 1:
@@ -1833,9 +1782,8 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
}
}
-static int dsi_set_lane_config(struct platform_device *dsidev)
+static int dsi_set_lane_config(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
static const u8 offsets[] = { 0, 4, 8, 12, 16 };
static const enum dsi_lane_function functions[] = {
DSI_LANE_CLK,
@@ -1847,12 +1795,12 @@ static int dsi_set_lane_config(struct platform_device *dsidev)
u32 r;
int i;
- r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
+ r = dsi_read_reg(dsi, DSI_COMPLEXIO_CFG1);
for (i = 0; i < dsi->num_lanes_used; ++i) {
- unsigned offset = offsets[i];
- unsigned polarity, lane_number;
- unsigned t;
+ unsigned int offset = offsets[i];
+ unsigned int polarity, lane_number;
+ unsigned int t;
for (t = 0; t < dsi->num_lanes_supported; ++t)
if (dsi->lanes[t].function == functions[i])
@@ -1870,37 +1818,34 @@ static int dsi_set_lane_config(struct platform_device *dsidev)
/* clear the unused lanes */
for (; i < dsi->num_lanes_supported; ++i) {
- unsigned offset = offsets[i];
+ unsigned int offset = offsets[i];
r = FLD_MOD(r, 0, offset + 2, offset);
r = FLD_MOD(r, 0, offset + 3, offset + 3);
}
- dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
+ dsi_write_reg(dsi, DSI_COMPLEXIO_CFG1, r);
return 0;
}
-static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
+static inline unsigned int ns2ddr(struct dsi_data *dsi, unsigned int ns)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
/* convert time in ns to ddr ticks, rounding up */
unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
+
return (ns * (ddr_clk / 1000 / 1000) + 999) / 1000;
}
-static inline unsigned ddr2ns(struct platform_device *dsidev, unsigned ddr)
+static inline unsigned int ddr2ns(struct dsi_data *dsi, unsigned int ddr)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
unsigned long ddr_clk = dsi->pll.cinfo.clkdco / 4;
+
return ddr * 1000 * 1000 / (ddr_clk / 1000);
}
-static void dsi_cio_timings(struct platform_device *dsidev)
+static void dsi_cio_timings(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
u32 ths_prepare, ths_prepare_ths_zero, ths_trail, ths_exit;
u32 tlpx_half, tclk_trail, tclk_zero;
@@ -1911,54 +1856,54 @@ static void dsi_cio_timings(struct platform_device *dsidev)
/* 1 * DDR_CLK = 2 * UI */
/* min 40ns + 4*UI max 85ns + 6*UI */
- ths_prepare = ns2ddr(dsidev, 70) + 2;
+ ths_prepare = ns2ddr(dsi, 70) + 2;
/* min 145ns + 10*UI */
- ths_prepare_ths_zero = ns2ddr(dsidev, 175) + 2;
+ ths_prepare_ths_zero = ns2ddr(dsi, 175) + 2;
/* min max(8*UI, 60ns+4*UI) */
- ths_trail = ns2ddr(dsidev, 60) + 5;
+ ths_trail = ns2ddr(dsi, 60) + 5;
/* min 100ns */
- ths_exit = ns2ddr(dsidev, 145);
+ ths_exit = ns2ddr(dsi, 145);
/* tlpx min 50n */
- tlpx_half = ns2ddr(dsidev, 25);
+ tlpx_half = ns2ddr(dsi, 25);
/* min 60ns */
- tclk_trail = ns2ddr(dsidev, 60) + 2;
+ tclk_trail = ns2ddr(dsi, 60) + 2;
/* min 38ns, max 95ns */
- tclk_prepare = ns2ddr(dsidev, 65);
+ tclk_prepare = ns2ddr(dsi, 65);
/* min tclk-prepare + tclk-zero = 300ns */
- tclk_zero = ns2ddr(dsidev, 260);
+ tclk_zero = ns2ddr(dsi, 260);
DSSDBG("ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns)\n",
- ths_prepare, ddr2ns(dsidev, ths_prepare),
- ths_prepare_ths_zero, ddr2ns(dsidev, ths_prepare_ths_zero));
+ ths_prepare, ddr2ns(dsi, ths_prepare),
+ ths_prepare_ths_zero, ddr2ns(dsi, ths_prepare_ths_zero));
DSSDBG("ths_trail %u (%uns), ths_exit %u (%uns)\n",
- ths_trail, ddr2ns(dsidev, ths_trail),
- ths_exit, ddr2ns(dsidev, ths_exit));
+ ths_trail, ddr2ns(dsi, ths_trail),
+ ths_exit, ddr2ns(dsi, ths_exit));
DSSDBG("tlpx_half %u (%uns), tclk_trail %u (%uns), "
"tclk_zero %u (%uns)\n",
- tlpx_half, ddr2ns(dsidev, tlpx_half),
- tclk_trail, ddr2ns(dsidev, tclk_trail),
- tclk_zero, ddr2ns(dsidev, tclk_zero));
+ tlpx_half, ddr2ns(dsi, tlpx_half),
+ tclk_trail, ddr2ns(dsi, tclk_trail),
+ tclk_zero, ddr2ns(dsi, tclk_zero));
DSSDBG("tclk_prepare %u (%uns)\n",
- tclk_prepare, ddr2ns(dsidev, tclk_prepare));
+ tclk_prepare, ddr2ns(dsi, tclk_prepare));
/* program timings */
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
r = FLD_MOD(r, ths_prepare, 31, 24);
r = FLD_MOD(r, ths_prepare_ths_zero, 23, 16);
r = FLD_MOD(r, ths_trail, 15, 8);
r = FLD_MOD(r, ths_exit, 7, 0);
- dsi_write_reg(dsidev, DSI_DSIPHY_CFG0, r);
+ dsi_write_reg(dsi, DSI_DSIPHY_CFG0, r);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
r = FLD_MOD(r, tlpx_half, 20, 16);
r = FLD_MOD(r, tclk_trail, 15, 8);
r = FLD_MOD(r, tclk_zero, 7, 0);
@@ -1969,18 +1914,18 @@ static void dsi_cio_timings(struct platform_device *dsidev)
r = FLD_MOD(r, 1, 23, 23); /* CLKINP_SEL = enable */
}
- dsi_write_reg(dsidev, DSI_DSIPHY_CFG1, r);
+ dsi_write_reg(dsi, DSI_DSIPHY_CFG1, r);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
r = FLD_MOD(r, tclk_prepare, 7, 0);
- dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
+ dsi_write_reg(dsi, DSI_DSIPHY_CFG2, r);
}
/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
-static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
- unsigned mask_p, unsigned mask_n)
+static void dsi_cio_enable_lane_override(struct dsi_data *dsi,
+ unsigned int mask_p,
+ unsigned int mask_n)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
u32 l;
u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
@@ -1988,7 +1933,7 @@ static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
l = 0;
for (i = 0; i < dsi->num_lanes_supported; ++i) {
- unsigned p = dsi->lanes[i].polarity;
+ unsigned int p = dsi->lanes[i].polarity;
if (mask_p & (1 << i))
l |= 1 << (i * 2 + (p ? 0 : 1));
@@ -2009,26 +1954,25 @@ static void dsi_cio_enable_lane_override(struct platform_device *dsidev,
/* Set the lane override configuration */
/* REGLPTXSCPDAT4TO0DXDY */
- REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
+ REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, l, lptxscp_start, 17);
/* Enable lane override */
/* ENLPTXSCPDAT */
- REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 1, 27, 27);
+ REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 1, 27, 27);
}
-static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
+static void dsi_cio_disable_lane_override(struct dsi_data *dsi)
{
/* Disable lane override */
- REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
+ REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */
/* Reset the lane override configuration */
/* REGLPTXSCPDAT4TO0DXDY */
- REG_FLD_MOD(dsidev, DSI_DSIPHY_CFG10, 0, 22, 17);
+ REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 22, 17);
}
-static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
+static int dsi_cio_wait_tx_clk_esc_reset(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int t, i;
bool in_use[DSI_MAX_NR_LANES];
static const u8 offsets_old[] = { 28, 27, 26 };
@@ -2048,7 +1992,7 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
u32 l;
int ok;
- l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
+ l = dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
ok = 0;
for (i = 0; i < dsi->num_lanes_supported; ++i) {
@@ -2075,10 +2019,9 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct platform_device *dsidev)
}
/* return bitmask of enabled lanes, lane0 being the lsb */
-static unsigned dsi_get_lane_mask(struct platform_device *dsidev)
+static unsigned int dsi_get_lane_mask(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- unsigned mask = 0;
+ unsigned int mask = 0;
int i;
for (i = 0; i < dsi->num_lanes_supported; ++i) {
@@ -2166,45 +2109,44 @@ static void dsi_disable_pads(struct dsi_data *dsi)
dsi_omap5_mux_pads(dsi, 0);
}
-static int dsi_cio_init(struct platform_device *dsidev)
+static int dsi_cio_init(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
u32 l;
DSSDBG("DSI CIO init starts");
- r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsidev));
+ r = dsi_enable_pads(dsi, dsi_get_lane_mask(dsi));
if (r)
return r;
- dsi_enable_scp_clk(dsidev);
+ dsi_enable_scp_clk(dsi);
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I/O. */
- dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
+ dsi_read_reg(dsi, DSI_DSIPHY_CFG5);
- if (wait_for_bit_change(dsidev, DSI_DSIPHY_CFG5, 30, 1) != 1) {
+ if (!wait_for_bit_change(dsi, DSI_DSIPHY_CFG5, 30, 1)) {
DSSERR("CIO SCP Clock domain not coming out of reset.\n");
r = -EIO;
goto err_scp_clk_dom;
}
- r = dsi_set_lane_config(dsidev);
+ r = dsi_set_lane_config(dsi);
if (r)
goto err_scp_clk_dom;
/* set TX STOP MODE timer to maximum for this operation */
- l = dsi_read_reg(dsidev, DSI_TIMING1);
+ l = dsi_read_reg(dsi, DSI_TIMING1);
l = FLD_MOD(l, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
l = FLD_MOD(l, 1, 14, 14); /* STOP_STATE_X16_IO */
l = FLD_MOD(l, 1, 13, 13); /* STOP_STATE_X4_IO */
l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */
- dsi_write_reg(dsidev, DSI_TIMING1, l);
+ dsi_write_reg(dsi, DSI_TIMING1, l);
if (dsi->ulps_enabled) {
- unsigned mask_p;
+ unsigned int mask_p;
int i;
DSSDBG("manual ulps exit\n");
@@ -2226,24 +2168,24 @@ static int dsi_cio_init(struct platform_device *dsidev)
mask_p |= 1 << i;
}
- dsi_cio_enable_lane_override(dsidev, mask_p, 0);
+ dsi_cio_enable_lane_override(dsi, mask_p, 0);
}
- r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
+ r = dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ON);
if (r)
goto err_cio_pwr;
- if (wait_for_bit_change(dsidev, DSI_COMPLEXIO_CFG1, 29, 1) != 1) {
+ if (!wait_for_bit_change(dsi, DSI_COMPLEXIO_CFG1, 29, 1)) {
DSSERR("CIO PWR clock domain not coming out of reset.\n");
r = -ENODEV;
goto err_cio_pwr_dom;
}
- dsi_if_enable(dsidev, true);
- dsi_if_enable(dsidev, false);
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
+ dsi_if_enable(dsi, true);
+ dsi_if_enable(dsi, false);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 1, 20, 20); /* LP_CLK_ENABLE */
- r = dsi_cio_wait_tx_clk_esc_reset(dsidev);
+ r = dsi_cio_wait_tx_clk_esc_reset(dsi);
if (r)
goto err_tx_clk_esc_rst;
@@ -2255,17 +2197,17 @@ static int dsi_cio_init(struct platform_device *dsidev)
/* Disable the override. The lanes should be set to Mark-11
* state by the HW */
- dsi_cio_disable_lane_override(dsidev);
+ dsi_cio_disable_lane_override(dsi);
}
/* FORCE_TX_STOP_MODE_IO */
- REG_FLD_MOD(dsidev, DSI_TIMING1, 0, 15, 15);
+ REG_FLD_MOD(dsi, DSI_TIMING1, 0, 15, 15);
- dsi_cio_timings(dsidev);
+ dsi_cio_timings(dsi);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
/* DDR_CLK_ALWAYS_ON */
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL,
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL,
dsi->vm_timings.ddr_clk_always_on, 13, 13);
}
@@ -2276,35 +2218,32 @@ static int dsi_cio_init(struct platform_device *dsidev)
return 0;
err_tx_clk_esc_rst:
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 20, 20); /* LP_CLK_ENABLE */
err_cio_pwr_dom:
- dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
+ dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
err_cio_pwr:
if (dsi->ulps_enabled)
- dsi_cio_disable_lane_override(dsidev);
+ dsi_cio_disable_lane_override(dsi);
err_scp_clk_dom:
- dsi_disable_scp_clk(dsidev);
+ dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
return r;
}
-static void dsi_cio_uninit(struct platform_device *dsidev)
+static void dsi_cio_uninit(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
/* DDR_CLK_ALWAYS_ON */
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
- dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
- dsi_disable_scp_clk(dsidev);
+ dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF);
+ dsi_disable_scp_clk(dsi);
dsi_disable_pads(dsi);
}
-static void dsi_config_tx_fifo(struct platform_device *dsidev,
- enum fifo_size size1, enum fifo_size size2,
- enum fifo_size size3, enum fifo_size size4)
+static void dsi_config_tx_fifo(struct dsi_data *dsi,
+ enum fifo_size size1, enum fifo_size size2,
+ enum fifo_size size3, enum fifo_size size4)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r = 0;
int add = 0;
int i;
@@ -2330,14 +2269,13 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
add += size;
}
- dsi_write_reg(dsidev, DSI_TX_FIFO_VC_SIZE, r);
+ dsi_write_reg(dsi, DSI_TX_FIFO_VC_SIZE, r);
}
-static void dsi_config_rx_fifo(struct platform_device *dsidev,
+static void dsi_config_rx_fifo(struct dsi_data *dsi,
enum fifo_size size1, enum fifo_size size2,
enum fifo_size size3, enum fifo_size size4)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r = 0;
int add = 0;
int i;
@@ -2363,18 +2301,18 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
add += size;
}
- dsi_write_reg(dsidev, DSI_RX_FIFO_VC_SIZE, r);
+ dsi_write_reg(dsi, DSI_RX_FIFO_VC_SIZE, r);
}
-static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
+static int dsi_force_tx_stop_mode_io(struct dsi_data *dsi)
{
u32 r;
- r = dsi_read_reg(dsidev, DSI_TIMING1);
+ r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
- dsi_write_reg(dsidev, DSI_TIMING1, r);
+ dsi_write_reg(dsi, DSI_TIMING1, r);
- if (wait_for_bit_change(dsidev, DSI_TIMING1, 15, 0) != 0) {
+ if (!wait_for_bit_change(dsi, DSI_TIMING1, 15, 0)) {
DSSERR("TX_STOP bit not going down\n");
return -EIO;
}
@@ -2382,29 +2320,28 @@ static int dsi_force_tx_stop_mode_io(struct platform_device *dsidev)
return 0;
}
-static bool dsi_vc_is_enabled(struct platform_device *dsidev, int channel)
+static bool dsi_vc_is_enabled(struct dsi_data *dsi, int channel)
{
- return REG_GET(dsidev, DSI_VC_CTRL(channel), 0, 0);
+ return REG_GET(dsi, DSI_VC_CTRL(channel), 0, 0);
}
static void dsi_packet_sent_handler_vp(void *data, u32 mask)
{
struct dsi_packet_sent_handler_data *vp_data =
(struct dsi_packet_sent_handler_data *) data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(vp_data->dsidev);
+ struct dsi_data *dsi = vp_data->dsi;
const int channel = dsi->update_channel;
u8 bit = dsi->te_enabled ? 30 : 31;
- if (REG_GET(vp_data->dsidev, DSI_VC_TE(channel), bit, bit) == 0)
+ if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit) == 0)
complete(vp_data->completion);
}
-static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
+static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data vp_data = {
- .dsidev = dsidev,
+ .dsi = dsi,
.completion = &completion
};
int r = 0;
@@ -2412,13 +2349,13 @@ static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
bit = dsi->te_enabled ? 30 : 31;
- r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
+ r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TE_EN/TE_START is still set */
- if (REG_GET(dsidev, DSI_VC_TE(channel), bit, bit)) {
+ if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous frame transfer\n");
@@ -2427,12 +2364,12 @@ static int dsi_sync_vc_vp(struct platform_device *dsidev, int channel)
}
}
- dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
+ dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
- dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_vp,
+ dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp,
&vp_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
@@ -2442,29 +2379,29 @@ static void dsi_packet_sent_handler_l4(void *data, u32 mask)
{
struct dsi_packet_sent_handler_data *l4_data =
(struct dsi_packet_sent_handler_data *) data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(l4_data->dsidev);
+ struct dsi_data *dsi = l4_data->dsi;
const int channel = dsi->update_channel;
- if (REG_GET(l4_data->dsidev, DSI_VC_CTRL(channel), 5, 5) == 0)
+ if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5) == 0)
complete(l4_data->completion);
}
-static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
+static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel)
{
DECLARE_COMPLETION_ONSTACK(completion);
struct dsi_packet_sent_handler_data l4_data = {
- .dsidev = dsidev,
+ .dsi = dsi,
.completion = &completion
};
int r = 0;
- r = dsi_register_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
+ r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
if (r)
goto err0;
/* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
- if (REG_GET(dsidev, DSI_VC_CTRL(channel), 5, 5)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5)) {
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(10)) == 0) {
DSSERR("Failed to complete previous l4 transfer\n");
@@ -2473,66 +2410,61 @@ static int dsi_sync_vc_l4(struct platform_device *dsidev, int channel)
}
}
- dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
+ dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
return 0;
err1:
- dsi_unregister_isr_vc(dsidev, channel, dsi_packet_sent_handler_l4,
+ dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4,
&l4_data, DSI_VC_IRQ_PACKET_SENT);
err0:
return r;
}
-static int dsi_sync_vc(struct platform_device *dsidev, int channel)
+static int dsi_sync_vc(struct dsi_data *dsi, int channel)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
WARN_ON(in_interrupt());
- if (!dsi_vc_is_enabled(dsidev, channel))
+ if (!dsi_vc_is_enabled(dsi, channel))
return 0;
switch (dsi->vc[channel].source) {
case DSI_VC_SOURCE_VP:
- return dsi_sync_vc_vp(dsidev, channel);
+ return dsi_sync_vc_vp(dsi, channel);
case DSI_VC_SOURCE_L4:
- return dsi_sync_vc_l4(dsidev, channel);
+ return dsi_sync_vc_l4(dsi, channel);
default:
BUG();
return -EINVAL;
}
}
-static int dsi_vc_enable(struct platform_device *dsidev, int channel,
- bool enable)
+static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable)
{
DSSDBG("dsi_vc_enable channel %d, enable %d\n",
channel, enable);
enable = enable ? 1 : 0;
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 0, 0);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 0, 0);
- if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel),
- 0, enable) != enable) {
- DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
- return -EIO;
+ if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 0, enable)) {
+ DSSERR("Failed to set dsi_vc_enable to %d\n", enable);
+ return -EIO;
}
return 0;
}
-static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
+static void dsi_vc_initial_config(struct dsi_data *dsi, int channel)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
DSSDBG("Initial config of virtual channel %d", channel);
- r = dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
+ r = dsi_read_reg(dsi, DSI_VC_CTRL(channel));
if (FLD_GET(r, 15, 15)) /* VC_BUSY */
DSSERR("VC(%d) busy when trying to configure it!\n",
@@ -2551,41 +2483,39 @@ static void dsi_vc_initial_config(struct platform_device *dsidev, int channel)
r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */
- dsi_write_reg(dsidev, DSI_VC_CTRL(channel), r);
+ dsi_write_reg(dsi, DSI_VC_CTRL(channel), r);
dsi->vc[channel].source = DSI_VC_SOURCE_L4;
}
-static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
- enum dsi_vc_source source)
+static int dsi_vc_config_source(struct dsi_data *dsi, int channel,
+ enum dsi_vc_source source)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
if (dsi->vc[channel].source == source)
return 0;
DSSDBG("Source config of virtual channel %d", channel);
- dsi_sync_vc(dsidev, channel);
+ dsi_sync_vc(dsi, channel);
- dsi_vc_enable(dsidev, channel, 0);
+ dsi_vc_enable(dsi, channel, 0);
/* VC_BUSY */
- if (wait_for_bit_change(dsidev, DSI_VC_CTRL(channel), 15, 0) != 0) {
+ if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 15, 0)) {
DSSERR("vc(%d) busy when trying to config for VP\n", channel);
return -EIO;
}
/* SOURCE, 0 = L4, 1 = video port */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), source, 1, 1);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), source, 1, 1);
/* DCS_CMD_ENABLE */
if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) {
bool enable = source == DSI_VC_SOURCE_VP;
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 30, 30);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 30, 30);
}
- dsi_vc_enable(dsidev, channel, 1);
+ dsi_vc_enable(dsi, channel, 1);
dsi->vc[channel].source = source;
@@ -2595,33 +2525,32 @@ static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
bool enable)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
- dsi_vc_enable(dsidev, channel, 0);
- dsi_if_enable(dsidev, 0);
+ dsi_vc_enable(dsi, channel, 0);
+ dsi_if_enable(dsi, 0);
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), enable, 9, 9);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 9, 9);
- dsi_vc_enable(dsidev, channel, 1);
- dsi_if_enable(dsidev, 1);
+ dsi_vc_enable(dsi, channel, 1);
+ dsi_if_enable(dsi, 1);
- dsi_force_tx_stop_mode_io(dsidev);
+ dsi_force_tx_stop_mode_io(dsi);
/* start the DDR clock by sending a NULL packet */
if (dsi->vm_timings.ddr_clk_always_on && enable)
- dsi_vc_send_null(dssdev, channel);
+ dsi_vc_send_null(dsi, channel);
}
-static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
+static void dsi_vc_flush_long_data(struct dsi_data *dsi, int channel)
{
- while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
u32 val;
- val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n",
(val >> 0) & 0xff,
(val >> 8) & 0xff,
@@ -2667,14 +2596,13 @@ static void dsi_show_rx_ack_with_err(u16 err)
DSSERR("\t\tDSI Protocol Violation\n");
}
-static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
- int channel)
+static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel)
{
/* RX_FIFO_NOT_EMPTY */
- while (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
+ while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
u32 val;
u8 dt;
- val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
DSSERR("\trawval %#08x\n", val);
dt = FLD_GET(val, 5, 0);
if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) {
@@ -2689,7 +2617,7 @@ static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
} else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) {
DSSERR("\tDCS long response, len %d\n",
FLD_GET(val, 23, 8));
- dsi_vc_flush_long_data(dsidev, channel);
+ dsi_vc_flush_long_data(dsi, channel);
} else {
DSSERR("\tunknown datatype 0x%02x\n", dt);
}
@@ -2697,47 +2625,45 @@ static u16 dsi_vc_flush_receive_data(struct platform_device *dsidev,
return 0;
}
-static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
+static int dsi_vc_send_bta(struct dsi_data *dsi, int channel)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
if (dsi->debug_write || dsi->debug_read)
DSSDBG("dsi_vc_send_bta %d\n", channel);
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
- dsi_vc_flush_receive_data(dsidev, channel);
+ dsi_vc_flush_receive_data(dsi, channel);
}
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
/* flush posted write */
- dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
+ dsi_read_reg(dsi, DSI_VC_CTRL(channel));
return 0;
}
static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
DECLARE_COMPLETION_ONSTACK(completion);
int r = 0;
u32 err;
- r = dsi_register_isr_vc(dsidev, channel, dsi_completion_handler,
+ r = dsi_register_isr_vc(dsi, channel, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
if (r)
goto err0;
- r = dsi_register_isr(dsidev, dsi_completion_handler, &completion,
+ r = dsi_register_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
if (r)
goto err1;
- r = dsi_vc_send_bta(dsidev, channel);
+ r = dsi_vc_send_bta(dsi, channel);
if (r)
goto err2;
@@ -2748,41 +2674,40 @@ static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
goto err2;
}
- err = dsi_get_errors(dsidev);
+ err = dsi_get_errors(dsi);
if (err) {
DSSERR("Error while sending BTA: %x\n", err);
r = -EIO;
goto err2;
}
err2:
- dsi_unregister_isr(dsidev, dsi_completion_handler, &completion,
+ dsi_unregister_isr(dsi, dsi_completion_handler, &completion,
DSI_IRQ_ERROR_MASK);
err1:
- dsi_unregister_isr_vc(dsidev, channel, dsi_completion_handler,
+ dsi_unregister_isr_vc(dsi, channel, dsi_completion_handler,
&completion, DSI_VC_IRQ_BTA);
err0:
return r;
}
-static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
- int channel, u8 data_type, u16 len, u8 ecc)
+static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int channel,
+ u8 data_type, u16 len, u8 ecc)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 val;
u8 data_id;
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
data_id = data_type | dsi->vc[channel].vc_id << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
- dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_HEADER(channel), val);
+ dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(channel), val);
}
-static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
- int channel, u8 b1, u8 b2, u8 b3, u8 b4)
+static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel,
+ u8 b1, u8 b2, u8 b3, u8 b4)
{
u32 val;
@@ -2791,14 +2716,13 @@ static inline void dsi_vc_write_long_payload(struct platform_device *dsidev,
/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
b1, b2, b3, b4, val); */
- dsi_write_reg(dsidev, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
+ dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(channel), val);
}
-static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
- u8 data_type, u8 *data, u16 len, u8 ecc)
+static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type,
+ u8 *data, u16 len, u8 ecc)
{
/*u32 val; */
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int i;
u8 *p;
int r = 0;
@@ -2813,9 +2737,9 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
return -EINVAL;
}
- dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
+ dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
- dsi_vc_write_long_header(dsidev, channel, data_type, len, ecc);
+ dsi_vc_write_long_header(dsi, channel, data_type, len, ecc);
p = data;
for (i = 0; i < len >> 2; i++) {
@@ -2827,7 +2751,7 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
b3 = *p++;
b4 = *p++;
- dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, b4);
+ dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, b4);
}
i = len % 4;
@@ -2852,29 +2776,28 @@ static int dsi_vc_send_long(struct platform_device *dsidev, int channel,
break;
}
- dsi_vc_write_long_payload(dsidev, channel, b1, b2, b3, 0);
+ dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, 0);
}
return r;
}
-static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
- u8 data_type, u16 data, u8 ecc)
+static int dsi_vc_send_short(struct dsi_data *dsi, int channel, u8 data_type,
+ u16 data, u8 ecc)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
u8 data_id;
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
if (dsi->debug_write)
DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
channel,
data_type, data & 0xff, (data >> 8) & 0xff);
- dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_L4);
+ dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4);
- if (FLD_GET(dsi_read_reg(dsidev, DSI_VC_CTRL(channel)), 16, 16)) {
+ if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(channel)), 16, 16)) {
DSSERR("ERROR FIFO FULL, aborting transfer\n");
return -EINVAL;
}
@@ -2883,41 +2806,39 @@ static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
r = (data_id << 0) | (data << 8) | (ecc << 24);
- dsi_write_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel), r);
+ dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel), r);
return 0;
}
-static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
+static int dsi_vc_send_null(struct dsi_data *dsi, int channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
-
- return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL,
- 0, 0);
+ return dsi_vc_send_long(dsi, channel, MIPI_DSI_NULL_PACKET, NULL, 0, 0);
}
-static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
- int channel, u8 *data, int len, enum dss_dsi_content_type type)
+static int dsi_vc_write_nosync_common(struct dsi_data *dsi, int channel,
+ u8 *data, int len,
+ enum dss_dsi_content_type type)
{
int r;
if (len == 0) {
BUG_ON(type == DSS_DSI_CONTENT_DCS);
- r = dsi_vc_send_short(dsidev, channel,
+ r = dsi_vc_send_short(dsi, channel,
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0);
} else if (len == 1) {
- r = dsi_vc_send_short(dsidev, channel,
+ r = dsi_vc_send_short(dsi, channel,
type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
MIPI_DSI_DCS_SHORT_WRITE, data[0], 0);
} else if (len == 2) {
- r = dsi_vc_send_short(dsidev, channel,
+ r = dsi_vc_send_short(dsi, channel,
type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
MIPI_DSI_DCS_SHORT_WRITE_PARAM,
data[0] | (data[1] << 8), 0);
} else {
- r = dsi_vc_send_long(dsidev, channel,
+ r = dsi_vc_send_long(dsi, channel,
type == DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_LONG_WRITE :
MIPI_DSI_DCS_LONG_WRITE, data, len, 0);
@@ -2929,28 +2850,29 @@ static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
u8 *data, int len)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
- return dsi_vc_write_nosync_common(dsidev, channel, data, len,
+ return dsi_vc_write_nosync_common(dsi, channel, data, len,
DSS_DSI_CONTENT_DCS);
}
static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
u8 *data, int len)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
- return dsi_vc_write_nosync_common(dsidev, channel, data, len,
+ return dsi_vc_write_nosync_common(dsi, channel, data, len,
DSS_DSI_CONTENT_GENERIC);
}
-static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len, enum dss_dsi_content_type type)
+static int dsi_vc_write_common(struct omap_dss_device *dssdev,
+ int channel, u8 *data, int len,
+ enum dss_dsi_content_type type)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_write_nosync_common(dsidev, channel, data, len, type);
+ r = dsi_vc_write_nosync_common(dsi, channel, data, len, type);
if (r)
goto err;
@@ -2959,9 +2881,9 @@ static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
goto err;
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20)) {
+ if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) {
DSSERR("rx fifo not empty after write, dumping data:\n");
- dsi_vc_flush_receive_data(dsidev, channel);
+ dsi_vc_flush_receive_data(dsi, channel);
r = -EIO;
goto err;
}
@@ -2987,17 +2909,16 @@ static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8
DSS_DSI_CONTENT_GENERIC);
}
-static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
- int channel, u8 dcs_cmd)
+static int dsi_vc_dcs_send_read_request(struct dsi_data *dsi, int channel,
+ u8 dcs_cmd)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
if (dsi->debug_read)
DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n",
channel, dcs_cmd);
- r = dsi_vc_send_short(dsidev, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
+ r = dsi_vc_send_short(dsi, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0);
if (r) {
DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)"
" failed\n", channel, dcs_cmd);
@@ -3007,10 +2928,9 @@ static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
return 0;
}
-static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
- int channel, u8 *reqdata, int reqlen)
+static int dsi_vc_generic_send_read_request(struct dsi_data *dsi, int channel,
+ u8 *reqdata, int reqlen)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u16 data;
u8 data_type;
int r;
@@ -3033,7 +2953,7 @@ static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
return -EINVAL;
}
- r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
+ r = dsi_vc_send_short(dsi, channel, data_type, data, 0);
if (r) {
DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)"
" failed\n", channel, reqlen);
@@ -3043,22 +2963,21 @@ static int dsi_vc_generic_send_read_request(struct platform_device *dsidev,
return 0;
}
-static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
- u8 *buf, int buflen, enum dss_dsi_content_type type)
+static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf,
+ int buflen, enum dss_dsi_content_type type)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 val;
u8 dt;
int r;
/* RX_FIFO_NOT_EMPTY */
- if (REG_GET(dsidev, DSI_VC_CTRL(channel), 20, 20) == 0) {
+ if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20) == 0) {
DSSERR("RX fifo empty when trying to read.\n");
r = -EIO;
goto err;
}
- val = dsi_read_reg(dsidev, DSI_VC_SHORT_PACKET_HEADER(channel));
+ val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel));
if (dsi->debug_read)
DSSDBG("\theader: %08x\n", val);
dt = FLD_GET(val, 5, 0);
@@ -3121,7 +3040,7 @@ static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
/* two byte checksum ends the packet, not included in len */
for (w = 0; w < len + 2;) {
int b;
- val = dsi_read_reg(dsidev,
+ val = dsi_read_reg(dsi,
DSI_VC_SHORT_PACKET_HEADER(channel));
if (dsi->debug_read)
DSSDBG("\t\t%02x %02x %02x %02x\n",
@@ -3155,10 +3074,10 @@ err:
static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
u8 *buf, int buflen)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_dcs_send_read_request(dsidev, channel, dcs_cmd);
+ r = dsi_vc_dcs_send_read_request(dsi, channel, dcs_cmd);
if (r)
goto err;
@@ -3166,7 +3085,7 @@ static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_c
if (r)
goto err;
- r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
+ r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
DSS_DSI_CONTENT_DCS);
if (r < 0)
goto err;
@@ -3185,10 +3104,10 @@ err:
static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_vc_generic_send_read_request(dsidev, channel, reqdata, reqlen);
+ r = dsi_vc_generic_send_read_request(dsi, channel, reqdata, reqlen);
if (r)
return r;
@@ -3196,7 +3115,7 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
if (r)
return r;
- r = dsi_vc_read_rx_fifo(dsidev, channel, buf, buflen,
+ r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen,
DSS_DSI_CONTENT_GENERIC);
if (r < 0)
return r;
@@ -3212,22 +3131,21 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
u16 len)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
- return dsi_vc_send_short(dsidev, channel,
+ return dsi_vc_send_short(dsi, channel,
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
}
-static int dsi_enter_ulps(struct platform_device *dsidev)
+static int dsi_enter_ulps(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DECLARE_COMPLETION_ONSTACK(completion);
int r, i;
- unsigned mask;
+ unsigned int mask;
DSSDBG("Entering ULPS");
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
WARN_ON(dsi->ulps_enabled);
@@ -3235,35 +3153,35 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
return 0;
/* DDR_CLK_ALWAYS_ON */
- if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
- dsi_if_enable(dsidev, 0);
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
- dsi_if_enable(dsidev, 1);
+ if (REG_GET(dsi, DSI_CLK_CTRL, 13, 13)) {
+ dsi_if_enable(dsi, 0);
+ REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13);
+ dsi_if_enable(dsi, 1);
}
- dsi_sync_vc(dsidev, 0);
- dsi_sync_vc(dsidev, 1);
- dsi_sync_vc(dsidev, 2);
- dsi_sync_vc(dsidev, 3);
+ dsi_sync_vc(dsi, 0);
+ dsi_sync_vc(dsi, 1);
+ dsi_sync_vc(dsi, 2);
+ dsi_sync_vc(dsi, 3);
- dsi_force_tx_stop_mode_io(dsidev);
+ dsi_force_tx_stop_mode_io(dsi);
- dsi_vc_enable(dsidev, 0, false);
- dsi_vc_enable(dsidev, 1, false);
- dsi_vc_enable(dsidev, 2, false);
- dsi_vc_enable(dsidev, 3, false);
+ dsi_vc_enable(dsi, 0, false);
+ dsi_vc_enable(dsi, 1, false);
+ dsi_vc_enable(dsi, 2, false);
+ dsi_vc_enable(dsi, 3, false);
- if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
+ if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */
DSSERR("HS busy when enabling ULPS\n");
return -EIO;
}
- if (REG_GET(dsidev, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
+ if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */
DSSERR("LP busy when enabling ULPS\n");
return -EIO;
}
- r = dsi_register_isr_cio(dsidev, dsi_completion_handler, &completion,
+ r = dsi_register_isr_cio(dsi, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
if (r)
return r;
@@ -3277,10 +3195,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
}
/* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
/* LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5);
+ REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, mask, 9, 5);
/* flush posted write and wait for SCP interface to finish the write */
- dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
+ dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(1000)) == 0) {
@@ -3289,31 +3207,31 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
goto err;
}
- dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
+ dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
/* Reset LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5);
+ REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, 0, 9, 5);
/* flush posted write and wait for SCP interface to finish the write */
- dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
+ dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2);
- dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
+ dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ULPS);
- dsi_if_enable(dsidev, false);
+ dsi_if_enable(dsi, false);
dsi->ulps_enabled = true;
return 0;
err:
- dsi_unregister_isr_cio(dsidev, dsi_completion_handler, &completion,
+ dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion,
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
return r;
}
-static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
- unsigned ticks, bool x4, bool x16)
+static void dsi_set_lp_rx_timeout(struct dsi_data *dsi, unsigned int ticks,
+ bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3322,14 +3240,14 @@ static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
- fck = dsi_fclk_rate(dsidev);
+ fck = dsi_fclk_rate(dsi);
- r = dsi_read_reg(dsidev, DSI_TIMING2);
+ r = dsi_read_reg(dsi, DSI_TIMING2);
r = FLD_MOD(r, 1, 15, 15); /* LP_RX_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* LP_RX_TO_X16 */
r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* LP_RX_TO_X4 */
r = FLD_MOD(r, ticks, 12, 0); /* LP_RX_COUNTER */
- dsi_write_reg(dsidev, DSI_TIMING2, r);
+ dsi_write_reg(dsi, DSI_TIMING2, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
@@ -3339,8 +3257,8 @@ static void dsi_set_lp_rx_timeout(struct platform_device *dsidev,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
- bool x8, bool x16)
+static void dsi_set_ta_timeout(struct dsi_data *dsi, unsigned int ticks,
+ bool x8, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3349,14 +3267,14 @@ static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
- fck = dsi_fclk_rate(dsidev);
+ fck = dsi_fclk_rate(dsi);
- r = dsi_read_reg(dsidev, DSI_TIMING1);
+ r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 31, 31); /* TA_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* TA_TO_X16 */
r = FLD_MOD(r, x8 ? 1 : 0, 29, 29); /* TA_TO_X8 */
r = FLD_MOD(r, ticks, 28, 16); /* TA_TO_COUNTER */
- dsi_write_reg(dsidev, DSI_TIMING1, r);
+ dsi_write_reg(dsi, DSI_TIMING1, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x8 ? 8 : 1);
@@ -3366,8 +3284,8 @@ static void dsi_set_ta_timeout(struct platform_device *dsidev, unsigned ticks,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_set_stop_state_counter(struct platform_device *dsidev,
- unsigned ticks, bool x4, bool x16)
+static void dsi_set_stop_state_counter(struct dsi_data *dsi, unsigned int ticks,
+ bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3376,14 +3294,14 @@ static void dsi_set_stop_state_counter(struct platform_device *dsidev,
BUG_ON(ticks > 0x1fff);
/* ticks in DSI_FCK */
- fck = dsi_fclk_rate(dsidev);
+ fck = dsi_fclk_rate(dsi);
- r = dsi_read_reg(dsidev, DSI_TIMING1);
+ r = dsi_read_reg(dsi, DSI_TIMING1);
r = FLD_MOD(r, 1, 15, 15); /* FORCE_TX_STOP_MODE_IO */
r = FLD_MOD(r, x16 ? 1 : 0, 14, 14); /* STOP_STATE_X16_IO */
r = FLD_MOD(r, x4 ? 1 : 0, 13, 13); /* STOP_STATE_X4_IO */
r = FLD_MOD(r, ticks, 12, 0); /* STOP_STATE_COUNTER_IO */
- dsi_write_reg(dsidev, DSI_TIMING1, r);
+ dsi_write_reg(dsi, DSI_TIMING1, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
@@ -3393,8 +3311,8 @@ static void dsi_set_stop_state_counter(struct platform_device *dsidev,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
- unsigned ticks, bool x4, bool x16)
+static void dsi_set_hs_tx_timeout(struct dsi_data *dsi, unsigned int ticks,
+ bool x4, bool x16)
{
unsigned long fck;
unsigned long total_ticks;
@@ -3403,14 +3321,14 @@ static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
BUG_ON(ticks > 0x1fff);
/* ticks in TxByteClkHS */
- fck = dsi_get_txbyteclkhs(dsidev);
+ fck = dsi_get_txbyteclkhs(dsi);
- r = dsi_read_reg(dsidev, DSI_TIMING2);
+ r = dsi_read_reg(dsi, DSI_TIMING2);
r = FLD_MOD(r, 1, 31, 31); /* HS_TX_TO */
r = FLD_MOD(r, x16 ? 1 : 0, 30, 30); /* HS_TX_TO_X16 */
r = FLD_MOD(r, x4 ? 1 : 0, 29, 29); /* HS_TX_TO_X8 (4 really) */
r = FLD_MOD(r, ticks, 28, 16); /* HS_TX_TO_COUNTER */
- dsi_write_reg(dsidev, DSI_TIMING2, r);
+ dsi_write_reg(dsi, DSI_TIMING2, r);
total_ticks = ticks * (x16 ? 16 : 1) * (x4 ? 4 : 1);
@@ -3420,9 +3338,8 @@ static void dsi_set_hs_tx_timeout(struct platform_device *dsidev,
(total_ticks * 1000) / (fck / 1000 / 1000));
}
-static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
+static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int num_line_buffers;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
@@ -3442,12 +3359,11 @@ static void dsi_config_vp_num_line_buffers(struct platform_device *dsidev)
}
/* LINE_BUFFER */
- REG_FLD_MOD(dsidev, DSI_CTRL, num_line_buffers, 13, 12);
+ REG_FLD_MOD(dsi, DSI_CTRL, num_line_buffers, 13, 12);
}
-static void dsi_config_vp_sync_events(struct platform_device *dsidev)
+static void dsi_config_vp_sync_events(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
bool sync_end;
u32 r;
@@ -3456,7 +3372,7 @@ static void dsi_config_vp_sync_events(struct platform_device *dsidev)
else
sync_end = false;
- r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */
r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */
r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */
@@ -3464,12 +3380,11 @@ static void dsi_config_vp_sync_events(struct platform_device *dsidev)
r = FLD_MOD(r, sync_end, 16, 16); /* VP_VSYNC_END */
r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */
r = FLD_MOD(r, sync_end, 18, 18); /* VP_HSYNC_END */
- dsi_write_reg(dsidev, DSI_CTRL, r);
+ dsi_write_reg(dsi, DSI_CTRL, r);
}
-static void dsi_config_blanking_modes(struct platform_device *dsidev)
+static void dsi_config_blanking_modes(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int blanking_mode = dsi->vm_timings.blanking_mode;
int hfp_blanking_mode = dsi->vm_timings.hfp_blanking_mode;
int hbp_blanking_mode = dsi->vm_timings.hbp_blanking_mode;
@@ -3480,12 +3395,12 @@ static void dsi_config_blanking_modes(struct platform_device *dsidev)
* 0 = TX FIFO packets sent or LPS in corresponding blanking periods
* 1 = Long blanking packets are sent in corresponding blanking periods
*/
- r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, blanking_mode, 20, 20); /* BLANKING_MODE */
r = FLD_MOD(r, hfp_blanking_mode, 21, 21); /* HFP_BLANKING */
r = FLD_MOD(r, hbp_blanking_mode, 22, 22); /* HBP_BLANKING */
r = FLD_MOD(r, hsa_blanking_mode, 23, 23); /* HSA_BLANKING */
- dsi_write_reg(dsidev, DSI_CTRL, r);
+ dsi_write_reg(dsi, DSI_CTRL, r);
}
/*
@@ -3550,9 +3465,8 @@ static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
return max(lp_inter, 0);
}
-static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
+static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int blanking_mode;
int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
@@ -3569,33 +3483,33 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
int bl_interleave_hs = 0, bl_interleave_lp = 0;
u32 r;
- r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = dsi_read_reg(dsi, DSI_CTRL);
blanking_mode = FLD_GET(r, 20, 20);
hfp_blanking_mode = FLD_GET(r, 21, 21);
hbp_blanking_mode = FLD_GET(r, 22, 22);
hsa_blanking_mode = FLD_GET(r, 23, 23);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING1);
hbp = FLD_GET(r, 11, 0);
hfp = FLD_GET(r, 23, 12);
hsa = FLD_GET(r, 31, 24);
- r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
+ r = dsi_read_reg(dsi, DSI_CLK_TIMING);
ddr_clk_post = FLD_GET(r, 7, 0);
ddr_clk_pre = FLD_GET(r, 15, 8);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING7);
exit_hs_mode_lat = FLD_GET(r, 15, 0);
enter_hs_mode_lat = FLD_GET(r, 31, 16);
- r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
+ r = dsi_read_reg(dsi, DSI_CLK_CTRL);
lp_clk_div = FLD_GET(r, 12, 0);
ddr_alwon = FLD_GET(r, 13, 13);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
ths_exit = FLD_GET(r, 7, 0);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tclk_trail = FLD_GET(r, 15, 8);
exiths_clk = ths_exit + tclk_trail;
@@ -3649,45 +3563,44 @@ static void dsi_config_cmd_mode_interleaving(struct platform_device *dsidev)
hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
bl_interleave_lp);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING4);
r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
- dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING4, r);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING5);
r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
- dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING5, r);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING6);
r = FLD_MOD(r, bl_interleave_hs, 31, 15);
r = FLD_MOD(r, bl_interleave_lp, 16, 0);
- dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING6, r);
}
-static int dsi_proto_config(struct platform_device *dsidev)
+static int dsi_proto_config(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u32 r;
int buswidth = 0;
- dsi_config_tx_fifo(dsidev, DSI_FIFO_SIZE_32,
+ dsi_config_tx_fifo(dsi, DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32);
- dsi_config_rx_fifo(dsidev, DSI_FIFO_SIZE_32,
+ dsi_config_rx_fifo(dsi, DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32,
DSI_FIFO_SIZE_32);
/* XXX what values for the timeouts? */
- dsi_set_stop_state_counter(dsidev, 0x1000, false, false);
- dsi_set_ta_timeout(dsidev, 0x1fff, true, true);
- dsi_set_lp_rx_timeout(dsidev, 0x1fff, true, true);
- dsi_set_hs_tx_timeout(dsidev, 0x1fff, true, true);
+ dsi_set_stop_state_counter(dsi, 0x1000, false, false);
+ dsi_set_ta_timeout(dsi, 0x1fff, true, true);
+ dsi_set_lp_rx_timeout(dsi, 0x1fff, true, true);
+ dsi_set_hs_tx_timeout(dsi, 0x1fff, true, true);
switch (dsi_get_pixel_size(dsi->pix_fmt)) {
case 16:
@@ -3704,7 +3617,7 @@ static int dsi_proto_config(struct platform_device *dsidev)
return -EINVAL;
}
- r = dsi_read_reg(dsidev, DSI_CTRL);
+ r = dsi_read_reg(dsi, DSI_CTRL);
r = FLD_MOD(r, 1, 1, 1); /* CS_RX_EN */
r = FLD_MOD(r, 1, 2, 2); /* ECC_RX_EN */
r = FLD_MOD(r, 1, 3, 3); /* TX_FIFO_ARBITRATION */
@@ -3719,56 +3632,55 @@ static int dsi_proto_config(struct platform_device *dsidev)
r = FLD_MOD(r, 0, 25, 25);
}
- dsi_write_reg(dsidev, DSI_CTRL, r);
+ dsi_write_reg(dsi, DSI_CTRL, r);
- dsi_config_vp_num_line_buffers(dsidev);
+ dsi_config_vp_num_line_buffers(dsi);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- dsi_config_vp_sync_events(dsidev);
- dsi_config_blanking_modes(dsidev);
- dsi_config_cmd_mode_interleaving(dsidev);
+ dsi_config_vp_sync_events(dsi);
+ dsi_config_blanking_modes(dsi);
+ dsi_config_cmd_mode_interleaving(dsi);
}
- dsi_vc_initial_config(dsidev, 0);
- dsi_vc_initial_config(dsidev, 1);
- dsi_vc_initial_config(dsidev, 2);
- dsi_vc_initial_config(dsidev, 3);
+ dsi_vc_initial_config(dsi, 0);
+ dsi_vc_initial_config(dsi, 1);
+ dsi_vc_initial_config(dsi, 2);
+ dsi_vc_initial_config(dsi, 3);
return 0;
}
-static void dsi_proto_timings(struct platform_device *dsidev)
+static void dsi_proto_timings(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
- unsigned tclk_pre, tclk_post;
- unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
- unsigned ths_trail, ths_exit;
- unsigned ddr_clk_pre, ddr_clk_post;
- unsigned enter_hs_mode_lat, exit_hs_mode_lat;
- unsigned ths_eot;
+ unsigned int tlpx, tclk_zero, tclk_prepare, tclk_trail;
+ unsigned int tclk_pre, tclk_post;
+ unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero;
+ unsigned int ths_trail, ths_exit;
+ unsigned int ddr_clk_pre, ddr_clk_post;
+ unsigned int enter_hs_mode_lat, exit_hs_mode_lat;
+ unsigned int ths_eot;
int ndl = dsi->num_lanes_used - 1;
u32 r;
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG0);
ths_prepare = FLD_GET(r, 31, 24);
ths_prepare_ths_zero = FLD_GET(r, 23, 16);
ths_zero = ths_prepare_ths_zero - ths_prepare;
ths_trail = FLD_GET(r, 15, 8);
ths_exit = FLD_GET(r, 7, 0);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1);
tlpx = FLD_GET(r, 20, 16) * 2;
tclk_trail = FLD_GET(r, 15, 8);
tclk_zero = FLD_GET(r, 7, 0);
- r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG2);
+ r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2);
tclk_prepare = FLD_GET(r, 7, 0);
/* min 8*UI */
tclk_pre = 20;
/* min 60ns + 52*UI */
- tclk_post = ns2ddr(dsidev, 60) + 26;
+ tclk_post = ns2ddr(dsi, 60) + 26;
ths_eot = DIV_ROUND_UP(4, ndl);
@@ -3779,10 +3691,10 @@ static void dsi_proto_timings(struct platform_device *dsidev)
BUG_ON(ddr_clk_pre == 0 || ddr_clk_pre > 255);
BUG_ON(ddr_clk_post == 0 || ddr_clk_post > 255);
- r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
+ r = dsi_read_reg(dsi, DSI_CLK_TIMING);
r = FLD_MOD(r, ddr_clk_pre, 15, 8);
r = FLD_MOD(r, ddr_clk_post, 7, 0);
- dsi_write_reg(dsidev, DSI_CLK_TIMING, r);
+ dsi_write_reg(dsi, DSI_CLK_TIMING, r);
DSSDBG("ddr_clk_pre %u, ddr_clk_post %u\n",
ddr_clk_pre,
@@ -3796,7 +3708,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
r = FLD_VAL(enter_hs_mode_lat, 31, 16) |
FLD_VAL(exit_hs_mode_lat, 15, 0);
- dsi_write_reg(dsidev, DSI_VM_TIMING7, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING7, r);
DSSDBG("enter_hs_mode_lat %u, exit_hs_mode_lat %u\n",
enter_hs_mode_lat, exit_hs_mode_lat);
@@ -3830,31 +3742,30 @@ static void dsi_proto_timings(struct platform_device *dsidev)
DSSDBG("VBP: %d, VFP: %d, VSA: %d, VACT: %d lines\n", vbp, vfp,
vsa, vm->vactive);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING1);
r = FLD_MOD(r, hbp, 11, 0); /* HBP */
r = FLD_MOD(r, hfp, 23, 12); /* HFP */
r = FLD_MOD(r, hsync_end ? hsa : 0, 31, 24); /* HSA */
- dsi_write_reg(dsidev, DSI_VM_TIMING1, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING1, r);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING2);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING2);
r = FLD_MOD(r, vbp, 7, 0); /* VBP */
r = FLD_MOD(r, vfp, 15, 8); /* VFP */
r = FLD_MOD(r, vsa, 23, 16); /* VSA */
r = FLD_MOD(r, window_sync, 27, 24); /* WINDOW_SYNC */
- dsi_write_reg(dsidev, DSI_VM_TIMING2, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING2, r);
- r = dsi_read_reg(dsidev, DSI_VM_TIMING3);
+ r = dsi_read_reg(dsi, DSI_VM_TIMING3);
r = FLD_MOD(r, vm->vactive, 14, 0); /* VACT */
r = FLD_MOD(r, tl, 31, 16); /* TL */
- dsi_write_reg(dsidev, DSI_VM_TIMING3, r);
+ dsi_write_reg(dsi, DSI_VM_TIMING3, r);
}
}
static int dsi_configure_pins(struct omap_dss_device *dssdev,
const struct omap_dsi_pin_config *pin_cfg)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int num_pins;
const int *pins;
struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
@@ -3919,9 +3830,7 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev,
static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- enum omap_channel dispc_channel = dssdev->dispc_channel;
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
struct omap_dss_device *out = &dsi->output;
u8 data_type;
@@ -3933,7 +3842,7 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
return -ENODEV;
}
- r = dsi_display_init_dispc(dsidev, dispc_channel);
+ r = dsi_display_init_dispc(dsi);
if (r)
goto err_init_dispc;
@@ -3956,22 +3865,22 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
goto err_pix_fmt;
}
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, channel, false);
/* MODE, 1 = video mode */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 4, 4);
word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8);
- dsi_vc_write_long_header(dsidev, channel, data_type,
+ dsi_vc_write_long_header(dsi, channel, data_type,
word_count, 0);
- dsi_vc_enable(dsidev, channel, true);
- dsi_if_enable(dsidev, true);
+ dsi_vc_enable(dsi, channel, true);
+ dsi_if_enable(dsi, true);
}
- r = dss_mgr_enable(dispc_channel);
+ r = dss_mgr_enable(&dsi->output);
if (r)
goto err_mgr_enable;
@@ -3979,57 +3888,53 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
err_mgr_enable:
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, channel, false);
}
err_pix_fmt:
- dsi_display_uninit_dispc(dsidev, dispc_channel);
+ dsi_display_uninit_dispc(dsi);
err_init_dispc:
return r;
}
static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- enum omap_channel dispc_channel = dssdev->dispc_channel;
+ struct dsi_data *dsi = to_dsi_data(dssdev);
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
+ dsi_if_enable(dsi, false);
+ dsi_vc_enable(dsi, channel, false);
/* MODE, 0 = command mode */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
+ REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 0, 4, 4);
- dsi_vc_enable(dsidev, channel, true);
- dsi_if_enable(dsidev, true);
+ dsi_vc_enable(dsi, channel, true);
+ dsi_if_enable(dsi, true);
}
- dss_mgr_disable(dispc_channel);
+ dss_mgr_disable(&dsi->output);
- dsi_display_uninit_dispc(dsidev, dispc_channel);
+ dsi_display_uninit_dispc(dsi);
}
-static void dsi_update_screen_dispc(struct platform_device *dsidev)
+static void dsi_update_screen_dispc(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- enum omap_channel dispc_channel = dsi->output.dispc_channel;
- unsigned bytespp;
- unsigned bytespl;
- unsigned bytespf;
- unsigned total_len;
- unsigned packet_payload;
- unsigned packet_len;
+ unsigned int bytespp;
+ unsigned int bytespl;
+ unsigned int bytespf;
+ unsigned int total_len;
+ unsigned int packet_payload;
+ unsigned int packet_len;
u32 l;
int r;
const unsigned channel = dsi->update_channel;
- const unsigned line_buf_size = dsi->line_buffer_size;
+ const unsigned int line_buf_size = dsi->line_buffer_size;
u16 w = dsi->vm.hactive;
u16 h = dsi->vm.vactive;
DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
- dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
+ dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_VP);
bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8;
bytespl = w * bytespp;
@@ -4050,16 +3955,16 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
total_len += (bytespf % packet_payload) + 1;
l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
- dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
+ dsi_write_reg(dsi, DSI_VC_TE(channel), l);
- dsi_vc_write_long_header(dsidev, channel, MIPI_DSI_DCS_LONG_WRITE,
+ dsi_vc_write_long_header(dsi, channel, MIPI_DSI_DCS_LONG_WRITE,
packet_len, 0);
if (dsi->te_enabled)
l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
else
l = FLD_MOD(l, 1, 31, 31); /* TE_START */
- dsi_write_reg(dsidev, DSI_VC_TE(channel), l);
+ dsi_write_reg(dsi, DSI_VC_TE(channel), l);
/* We put SIDLEMODE to no-idle for the duration of the transfer,
* because DSS interrupts are not capable of waking up the CPU and the
@@ -4067,24 +3972,24 @@ static void dsi_update_screen_dispc(struct platform_device *dsidev)
* the same goes for any DSS interrupts, but for some reason I have not
* seen the problem anywhere else than here.
*/
- dispc_disable_sidle();
+ dispc_disable_sidle(dsi->dss->dispc);
- dsi_perf_mark_start(dsidev);
+ dsi_perf_mark_start(dsi);
r = schedule_delayed_work(&dsi->framedone_timeout_work,
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_mgr_set_timings(dispc_channel, &dsi->vm);
+ dss_mgr_set_timings(&dsi->output, &dsi->vm);
- dss_mgr_start_update(dispc_channel);
+ dss_mgr_start_update(&dsi->output);
if (dsi->te_enabled) {
/* disable LP_RX_TO, so that we can receive TE. Time to wait
* for TE is longer than the timer allows */
- REG_FLD_MOD(dsidev, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
+ REG_FLD_MOD(dsi, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
- dsi_vc_send_bta(dsidev, channel);
+ dsi_vc_send_bta(dsi, channel);
#ifdef DSI_CATCH_MISSING_TE
mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250));
@@ -4099,22 +4004,20 @@ static void dsi_te_timeout(struct timer_list *unused)
}
#endif
-static void dsi_handle_framedone(struct platform_device *dsidev, int error)
+static void dsi_handle_framedone(struct dsi_data *dsi, int error)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
/* SIDLEMODE back to smart-idle */
- dispc_enable_sidle();
+ dispc_enable_sidle(dsi->dss->dispc);
if (dsi->te_enabled) {
/* enable LP_RX_TO again after the TE */
- REG_FLD_MOD(dsidev, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
+ REG_FLD_MOD(dsi, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
dsi->framedone_callback(error, dsi->framedone_data);
if (!error)
- dsi_perf_show(dsidev, "DISPC");
+ dsi_perf_show(dsi, "DISPC");
}
static void dsi_framedone_timeout_work_callback(struct work_struct *work)
@@ -4130,13 +4033,12 @@ static void dsi_framedone_timeout_work_callback(struct work_struct *work)
DSSERR("Framedone not received for 250ms!\n");
- dsi_handle_framedone(dsi->pdev, -ETIMEDOUT);
+ dsi_handle_framedone(dsi, -ETIMEDOUT);
}
static void dsi_framedone_irq_callback(void *data)
{
- struct platform_device *dsidev = (struct platform_device *) data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = data;
/* Note: We get FRAMEDONE when DISPC has finished sending pixels and
* turns itself off. However, DSI still has the pixels in its buffers,
@@ -4145,17 +4047,16 @@ static void dsi_framedone_irq_callback(void *data)
cancel_delayed_work(&dsi->framedone_timeout_work);
- dsi_handle_framedone(dsidev, 0);
+ dsi_handle_framedone(dsi, 0);
}
static int dsi_update(struct omap_dss_device *dssdev, int channel,
void (*callback)(int, void *), void *data)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
u16 dw, dh;
- dsi_perf_mark_setup(dsidev);
+ dsi_perf_mark_setup(dsi);
dsi->update_channel = channel;
@@ -4169,26 +4070,25 @@ static int dsi_update(struct omap_dss_device *dssdev, int channel,
dsi->update_bytes = dw * dh *
dsi_get_pixel_size(dsi->pix_fmt) / 8;
#endif
- dsi_update_screen_dispc(dsidev);
+ dsi_update_screen_dispc(dsi);
return 0;
}
/* Display funcs */
-static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
+static int dsi_configure_dispc_clocks(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dispc_clock_info dispc_cinfo;
int r;
unsigned long fck;
- fck = dsi_get_pll_hsdiv_dispc_rate(dsidev);
+ fck = dsi_get_pll_hsdiv_dispc_rate(dsi);
dispc_cinfo.lck_div = dsi->user_dispc_cinfo.lck_div;
dispc_cinfo.pck_div = dsi->user_dispc_cinfo.pck_div;
- r = dispc_calc_clock_rates(fck, &dispc_cinfo);
+ r = dispc_calc_clock_rates(dsi->dss->dispc, fck, &dispc_cinfo);
if (r) {
DSSERR("Failed to calc dispc clocks\n");
return r;
@@ -4199,19 +4099,18 @@ static int dsi_configure_dispc_clocks(struct platform_device *dsidev)
return 0;
}
-static int dsi_display_init_dispc(struct platform_device *dsidev,
- enum omap_channel channel)
+static int dsi_display_init_dispc(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ enum omap_channel channel = dsi->output.dispc_channel;
int r;
- dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
+ dss_select_lcd_clk_source(dsi->dss, channel, dsi->module_id == 0 ?
DSS_CLK_SRC_PLL1_1 :
DSS_CLK_SRC_PLL2_1);
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
- r = dss_mgr_register_framedone_handler(channel,
- dsi_framedone_irq_callback, dsidev);
+ r = dss_mgr_register_framedone_handler(&dsi->output,
+ dsi_framedone_irq_callback, dsi);
if (r) {
DSSERR("can't register FRAMEDONE handler\n");
goto err;
@@ -4240,9 +4139,9 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
- dss_mgr_set_timings(channel, &dsi->vm);
+ dss_mgr_set_timings(&dsi->output, &dsi->vm);
- r = dsi_configure_dispc_clocks(dsidev);
+ r = dsi_configure_dispc_clocks(dsi);
if (r)
goto err1;
@@ -4251,33 +4150,31 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
dsi_get_pixel_size(dsi->pix_fmt);
dsi->mgr_config.lcden_sig_polarity = 0;
- dss_mgr_set_lcd_config(channel, &dsi->mgr_config);
+ dss_mgr_set_lcd_config(&dsi->output, &dsi->mgr_config);
return 0;
err1:
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
- dss_mgr_unregister_framedone_handler(channel,
- dsi_framedone_irq_callback, dsidev);
+ dss_mgr_unregister_framedone_handler(&dsi->output,
+ dsi_framedone_irq_callback, dsi);
err:
- dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
return r;
}
-static void dsi_display_uninit_dispc(struct platform_device *dsidev,
- enum omap_channel channel)
+static void dsi_display_uninit_dispc(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ enum omap_channel channel = dsi->output.dispc_channel;
if (dsi->mode == OMAP_DSS_DSI_CMD_MODE)
- dss_mgr_unregister_framedone_handler(channel,
- dsi_framedone_irq_callback, dsidev);
+ dss_mgr_unregister_framedone_handler(&dsi->output,
+ dsi_framedone_irq_callback, dsi);
- dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
+ dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK);
}
-static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
+static int dsi_configure_dsi_clocks(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll_clock_info cinfo;
int r;
@@ -4292,99 +4189,95 @@ static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
return 0;
}
-static int dsi_display_init_dsi(struct platform_device *dsidev)
+static int dsi_display_init_dsi(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
r = dss_pll_enable(&dsi->pll);
if (r)
goto err0;
- r = dsi_configure_dsi_clocks(dsidev);
+ r = dsi_configure_dsi_clocks(dsi);
if (r)
goto err1;
- dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
- DSS_CLK_SRC_PLL1_2 :
- DSS_CLK_SRC_PLL2_2);
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
+ dsi->module_id == 0 ?
+ DSS_CLK_SRC_PLL1_2 : DSS_CLK_SRC_PLL2_2);
DSSDBG("PLL OK\n");
- r = dsi_cio_init(dsidev);
+ r = dsi_cio_init(dsi);
if (r)
goto err2;
- _dsi_print_reset_status(dsidev);
+ _dsi_print_reset_status(dsi);
- dsi_proto_timings(dsidev);
- dsi_set_lp_clk_divisor(dsidev);
+ dsi_proto_timings(dsi);
+ dsi_set_lp_clk_divisor(dsi);
if (1)
- _dsi_print_reset_status(dsidev);
+ _dsi_print_reset_status(dsi);
- r = dsi_proto_config(dsidev);
+ r = dsi_proto_config(dsi);
if (r)
goto err3;
/* enable interface */
- dsi_vc_enable(dsidev, 0, 1);
- dsi_vc_enable(dsidev, 1, 1);
- dsi_vc_enable(dsidev, 2, 1);
- dsi_vc_enable(dsidev, 3, 1);
- dsi_if_enable(dsidev, 1);
- dsi_force_tx_stop_mode_io(dsidev);
+ dsi_vc_enable(dsi, 0, 1);
+ dsi_vc_enable(dsi, 1, 1);
+ dsi_vc_enable(dsi, 2, 1);
+ dsi_vc_enable(dsi, 3, 1);
+ dsi_if_enable(dsi, 1);
+ dsi_force_tx_stop_mode_io(dsi);
return 0;
err3:
- dsi_cio_uninit(dsidev);
+ dsi_cio_uninit(dsi);
err2:
- dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
err1:
dss_pll_disable(&dsi->pll);
err0:
return r;
}
-static void dsi_display_uninit_dsi(struct platform_device *dsidev,
- bool disconnect_lanes, bool enter_ulps)
+static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
+ bool enter_ulps)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
if (enter_ulps && !dsi->ulps_enabled)
- dsi_enter_ulps(dsidev);
+ dsi_enter_ulps(dsi);
/* disable interface */
- dsi_if_enable(dsidev, 0);
- dsi_vc_enable(dsidev, 0, 0);
- dsi_vc_enable(dsidev, 1, 0);
- dsi_vc_enable(dsidev, 2, 0);
- dsi_vc_enable(dsidev, 3, 0);
+ dsi_if_enable(dsi, 0);
+ dsi_vc_enable(dsi, 0, 0);
+ dsi_vc_enable(dsi, 1, 0);
+ dsi_vc_enable(dsi, 2, 0);
+ dsi_vc_enable(dsi, 3, 0);
- dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
- dsi_cio_uninit(dsidev);
- dsi_pll_uninit(dsidev, disconnect_lanes);
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
+ dsi_cio_uninit(dsi);
+ dsi_pll_uninit(dsi, disconnect_lanes);
}
static int dsi_display_enable(struct omap_dss_device *dssdev)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int r = 0;
DSSDBG("dsi_display_enable\n");
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
mutex_lock(&dsi->lock);
- r = dsi_runtime_get(dsidev);
+ r = dsi_runtime_get(dsi);
if (r)
goto err_get_dsi;
- _dsi_initialize_irq(dsidev);
+ _dsi_initialize_irq(dsi);
- r = dsi_display_init_dsi(dsidev);
+ r = dsi_display_init_dsi(dsi);
if (r)
goto err_init_dsi;
@@ -4393,7 +4286,7 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
return 0;
err_init_dsi:
- dsi_runtime_put(dsidev);
+ dsi_runtime_put(dsi);
err_get_dsi:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
@@ -4403,31 +4296,29 @@ err_get_dsi:
static void dsi_display_disable(struct omap_dss_device *dssdev,
bool disconnect_lanes, bool enter_ulps)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
DSSDBG("dsi_display_disable\n");
- WARN_ON(!dsi_bus_is_locked(dsidev));
+ WARN_ON(!dsi_bus_is_locked(dsi));
mutex_lock(&dsi->lock);
- dsi_sync_vc(dsidev, 0);
- dsi_sync_vc(dsidev, 1);
- dsi_sync_vc(dsidev, 2);
- dsi_sync_vc(dsidev, 3);
+ dsi_sync_vc(dsi, 0);
+ dsi_sync_vc(dsi, 1);
+ dsi_sync_vc(dsi, 2);
+ dsi_sync_vc(dsi, 3);
- dsi_display_uninit_dsi(dsidev, disconnect_lanes, enter_ulps);
+ dsi_display_uninit_dsi(dsi, disconnect_lanes, enter_ulps);
- dsi_runtime_put(dsidev);
+ dsi_runtime_put(dsi);
mutex_unlock(&dsi->lock);
}
static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
dsi->te_enabled = enable;
return 0;
@@ -4548,15 +4439,16 @@ static bool dsi_cm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc;
ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc;
- return dispc_div_calc(dispc, ctx->req_pck_min, ctx->req_pck_max,
- dsi_cm_calc_dispc_cb, ctx);
+ return dispc_div_calc(ctx->dsi->dss->dispc, dispc,
+ ctx->req_pck_min, ctx->req_pck_max,
+ dsi_cm_calc_dispc_cb, ctx);
}
static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
+ struct dsi_data *dsi = ctx->dsi;
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
@@ -4592,7 +4484,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
txbyteclk = pck * bitspp / 8 / ndl;
memset(ctx, 0, sizeof(*ctx));
- ctx->dsidev = dsi->pdev;
+ ctx->dsi = dsi;
ctx->pll = &dsi->pll;
ctx->config = cfg;
ctx->req_pck_min = pck;
@@ -4609,7 +4501,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
+ struct dsi_data *dsi = ctx->dsi;
const struct omap_dss_dsi_config *cfg = ctx->config;
int bitspp = dsi_get_pixel_size(cfg->pixel_format);
int ndl = dsi->num_lanes_used - 1;
@@ -4848,15 +4740,16 @@ static bool dsi_vm_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
else
pck_max = ctx->req_pck_max;
- return dispc_div_calc(dispc, ctx->req_pck_min, pck_max,
- dsi_vm_calc_dispc_cb, ctx);
+ return dispc_div_calc(ctx->dsi->dss->dispc, dispc,
+ ctx->req_pck_min, pck_max,
+ dsi_vm_calc_dispc_cb, ctx);
}
static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
unsigned long clkdco, void *data)
{
struct dsi_clk_calc_ctx *ctx = data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(ctx->dsidev);
+ struct dsi_data *dsi = ctx->dsi;
ctx->dsi_cinfo.n = n;
ctx->dsi_cinfo.m = m;
@@ -4883,7 +4776,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
clkin = clk_get_rate(dsi->pll.clkin);
memset(ctx, 0, sizeof(*ctx));
- ctx->dsidev = dsi->pdev;
+ ctx->dsi = dsi;
ctx->pll = &dsi->pll;
ctx->config = cfg;
@@ -4913,8 +4806,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
static int dsi_set_config(struct omap_dss_device *dssdev,
const struct omap_dss_dsi_config *config)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
struct dsi_clk_calc_ctx ctx;
bool ok;
int r;
@@ -5001,8 +4893,7 @@ static enum omap_channel dsi_get_channel(struct dsi_data *dsi)
static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int i;
for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) {
@@ -5019,8 +4910,7 @@ static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
if (vc_id < 0 || vc_id > 3) {
DSSERR("VC ID out of range\n");
@@ -5045,8 +4935,7 @@ static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = to_dsi_data(dssdev);
if ((channel >= 0 && channel <= 3) &&
dsi->vc[channel].dssdev == dssdev) {
@@ -5056,12 +4945,11 @@ static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
}
-static int dsi_get_clocks(struct platform_device *dsidev)
+static int dsi_get_clocks(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct clk *clk;
- clk = devm_clk_get(&dsidev->dev, "fck");
+ clk = devm_clk_get(dsi->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get fck\n");
return PTR_ERR(clk);
@@ -5075,15 +4963,14 @@ static int dsi_get_clocks(struct platform_device *dsidev)
static int dsi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- enum omap_channel dispc_channel = dssdev->dispc_channel;
+ struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_regulator_init(dsidev);
+ r = dsi_regulator_init(dsi);
if (r)
return r;
- r = dss_mgr_connect(dispc_channel, dssdev);
+ r = dss_mgr_connect(&dsi->output, dssdev);
if (r)
return r;
@@ -5091,7 +4978,7 @@ static int dsi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dssdev->name);
- dss_mgr_disconnect(dispc_channel, dssdev);
+ dss_mgr_disconnect(&dsi->output, dssdev);
return r;
}
@@ -5101,7 +4988,7 @@ static int dsi_connect(struct omap_dss_device *dssdev,
static void dsi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel dispc_channel = dssdev->dispc_channel;
+ struct dsi_data *dsi = to_dsi_data(dssdev);
WARN_ON(dst != dssdev->dst);
@@ -5110,7 +4997,7 @@ static void dsi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(dispc_channel, dssdev);
+ dss_mgr_disconnect(&dsi->output, dssdev);
}
static const struct omapdss_dsi_ops dsi_ops = {
@@ -5152,12 +5039,11 @@ static const struct omapdss_dsi_ops dsi_ops = {
.set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
};
-static void dsi_init_output(struct platform_device *dsidev)
+static void dsi_init_output(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_device *out = &dsi->output;
- out->dev = &dsidev->dev;
+ out->dev = dsi->dev;
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
@@ -5170,18 +5056,16 @@ static void dsi_init_output(struct platform_device *dsidev)
omapdss_register_output(out);
}
-static void dsi_uninit_output(struct platform_device *dsidev)
+static void dsi_uninit_output(struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct omap_dss_device *out = &dsi->output;
omapdss_unregister_output(out);
}
-static int dsi_probe_of(struct platform_device *pdev)
+static int dsi_probe_of(struct dsi_data *dsi)
{
- struct device_node *node = pdev->dev.of_node;
- struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
+ struct device_node *node = dsi->dev->of_node;
struct property *prop;
u32 lane_arr[10];
int len, num_pins;
@@ -5195,7 +5079,7 @@ static int dsi_probe_of(struct platform_device *pdev)
prop = of_find_property(ep, "lanes", &len);
if (prop == NULL) {
- dev_err(&pdev->dev, "failed to find lane data\n");
+ dev_err(dsi->dev, "failed to find lane data\n");
r = -EINVAL;
goto err;
}
@@ -5204,14 +5088,14 @@ static int dsi_probe_of(struct platform_device *pdev)
if (num_pins < 4 || num_pins % 2 != 0 ||
num_pins > dsi->num_lanes_supported * 2) {
- dev_err(&pdev->dev, "bad number of lanes\n");
+ dev_err(dsi->dev, "bad number of lanes\n");
r = -EINVAL;
goto err;
}
r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
if (r) {
- dev_err(&pdev->dev, "failed to read lane data\n");
+ dev_err(dsi->dev, "failed to read lane data\n");
goto err;
}
@@ -5221,7 +5105,7 @@ static int dsi_probe_of(struct platform_device *pdev)
r = dsi_configure_pins(&dsi->output, &pin_cfg);
if (r) {
- dev_err(&pdev->dev, "failed to configure pins");
+ dev_err(dsi->dev, "failed to configure pins");
goto err;
}
@@ -5321,14 +5205,13 @@ static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
.has_refsel = true,
};
-static int dsi_init_pll_data(struct platform_device *dsidev)
+static int dsi_init_pll_data(struct dss_device *dss, struct dsi_data *dsi)
{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
struct dss_pll *pll = &dsi->pll;
struct clk *clk;
int r;
- clk = devm_clk_get(&dsidev->dev, "sys_clk");
+ clk = devm_clk_get(dsi->dev, "sys_clk");
if (IS_ERR(clk)) {
DSSERR("can't get sys_clk\n");
return PTR_ERR(clk);
@@ -5341,7 +5224,7 @@ static int dsi_init_pll_data(struct platform_device *dsidev)
pll->hw = dsi->data->pll_hw;
pll->ops = &dsi_pll_ops;
- r = dss_pll_register(pll);
+ r = dss_pll_register(dss, pll);
if (r)
return r;
@@ -5413,9 +5296,11 @@ static const struct soc_device_attribute dsi_soc_devices[] = {
{ .machine = "AM35*", .data = &dsi_of_data_omap34xx },
{ /* sentinel */ }
};
+
static int dsi_bind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *dsidev = to_platform_device(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dss_device *dss = dss_get_device(master);
const struct soc_device_attribute *soc;
const struct dsi_module_id_data *d;
u32 rev;
@@ -5424,12 +5309,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
struct resource *dsi_mem;
struct resource *res;
- dsi = devm_kzalloc(&dsidev->dev, sizeof(*dsi), GFP_KERNEL);
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
- dsi->pdev = dsidev;
- dev_set_drvdata(&dsidev->dev, dsi);
+ dsi->dss = dss;
+ dsi->dev = dev;
+ dev_set_drvdata(dev, dsi);
spin_lock_init(&dsi->irq_lock);
spin_lock_init(&dsi->errors_lock);
@@ -5450,29 +5336,29 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
- dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
- dsi->proto_base = devm_ioremap_resource(&dsidev->dev, dsi_mem);
+ dsi_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proto");
+ dsi->proto_base = devm_ioremap_resource(dev, dsi_mem);
if (IS_ERR(dsi->proto_base))
return PTR_ERR(dsi->proto_base);
- res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "phy");
- dsi->phy_base = devm_ioremap_resource(&dsidev->dev, res);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
+ dsi->phy_base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->phy_base))
return PTR_ERR(dsi->phy_base);
- res = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "pll");
- dsi->pll_base = devm_ioremap_resource(&dsidev->dev, res);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
+ dsi->pll_base = devm_ioremap_resource(dev, res);
if (IS_ERR(dsi->pll_base))
return PTR_ERR(dsi->pll_base);
- dsi->irq = platform_get_irq(dsi->pdev, 0);
+ dsi->irq = platform_get_irq(pdev, 0);
if (dsi->irq < 0) {
DSSERR("platform_get_irq failed\n");
return -ENODEV;
}
- r = devm_request_irq(&dsidev->dev, dsi->irq, omap_dsi_irq_handler,
- IRQF_SHARED, dev_name(&dsidev->dev), dsi->pdev);
+ r = devm_request_irq(dev, dsi->irq, omap_dsi_irq_handler,
+ IRQF_SHARED, dev_name(dev), dsi);
if (r < 0) {
DSSERR("request_irq failed\n");
return r;
@@ -5520,83 +5406,92 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
dsi->vc[i].vc_id = 0;
}
- r = dsi_get_clocks(dsidev);
+ r = dsi_get_clocks(dsi);
if (r)
return r;
- dsi_init_pll_data(dsidev);
+ dsi_init_pll_data(dss, dsi);
- pm_runtime_enable(&dsidev->dev);
+ pm_runtime_enable(dev);
- r = dsi_runtime_get(dsidev);
+ r = dsi_runtime_get(dsi);
if (r)
goto err_runtime_get;
- rev = dsi_read_reg(dsidev, DSI_REVISION);
- dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
+ rev = dsi_read_reg(dsi, DSI_REVISION);
+ dev_dbg(dev, "OMAP DSI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data to 3 by default */
if (dsi->data->quirks & DSI_QUIRK_GNQ)
/* NB_DATA_LANES */
- dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
+ dsi->num_lanes_supported = 1 + REG_GET(dsi, DSI_GNQ, 11, 9);
else
dsi->num_lanes_supported = 3;
- dsi->line_buffer_size = dsi_get_line_buf_size(dsidev);
+ dsi->line_buffer_size = dsi_get_line_buf_size(dsi);
- dsi_init_output(dsidev);
+ dsi_init_output(dsi);
- r = dsi_probe_of(dsidev);
+ r = dsi_probe_of(dsi);
if (r) {
DSSERR("Invalid DSI DT data\n");
goto err_probe_of;
}
- r = of_platform_populate(dsidev->dev.of_node, NULL, NULL, &dsidev->dev);
+ r = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (r)
DSSERR("Failed to populate DSI child devices: %d\n", r);
- dsi_runtime_put(dsidev);
+ dsi_runtime_put(dsi);
if (dsi->module_id == 0)
- dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
- else if (dsi->module_id == 1)
- dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
-
+ dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi1_regs",
+ dsi1_dump_regs,
+ &dsi);
+ else
+ dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi2_regs",
+ dsi2_dump_regs,
+ &dsi);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
if (dsi->module_id == 0)
- dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
- else if (dsi->module_id == 1)
- dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
+ dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi1_irqs",
+ dsi1_dump_irqs,
+ &dsi);
+ else
+ dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi2_irqs",
+ dsi2_dump_irqs,
+ &dsi);
#endif
return 0;
err_probe_of:
- dsi_uninit_output(dsidev);
- dsi_runtime_put(dsidev);
+ dsi_uninit_output(dsi);
+ dsi_runtime_put(dsi);
err_runtime_get:
- pm_runtime_disable(&dsidev->dev);
+ pm_runtime_disable(dev);
return r;
}
static void dsi_unbind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *dsidev = to_platform_device(dev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ struct dsi_data *dsi = dev_get_drvdata(dev);
- of_platform_depopulate(&dsidev->dev);
+ dss_debugfs_remove_file(dsi->debugfs.irqs);
+ dss_debugfs_remove_file(dsi->debugfs.regs);
+
+ of_platform_depopulate(dev);
WARN_ON(dsi->scp_clk_refcount > 0);
dss_pll_unregister(&dsi->pll);
- dsi_uninit_output(dsidev);
+ dsi_uninit_output(dsi);
- pm_runtime_disable(&dsidev->dev);
+ pm_runtime_disable(dev);
if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
regulator_disable(dsi->vdds_dsi_reg);
@@ -5622,8 +5517,7 @@ static int dsi_remove(struct platform_device *pdev)
static int dsi_runtime_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
+ struct dsi_data *dsi = dev_get_drvdata(dev);
dsi->is_enabled = false;
/* ensure the irq handler sees the is_enabled value */
@@ -5631,18 +5525,17 @@ static int dsi_runtime_suspend(struct device *dev)
/* wait for current handler to finish before turning the DSI off */
synchronize_irq(dsi->irq);
- dispc_runtime_put();
+ dispc_runtime_put(dsi->dss->dispc);
return 0;
}
static int dsi_runtime_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
+ struct dsi_data *dsi = dev_get_drvdata(dev);
int r;
- r = dispc_runtime_get();
+ r = dispc_runtime_get(dsi->dss->dispc);
if (r)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index 967d9e1b34e5..4602a79c6c44 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -44,7 +44,6 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
return NULL;
}
-EXPORT_SYMBOL_GPL(dss_of_port_get_parent_device);
u32 dss_of_port_get_port_number(struct device_node *port)
{
@@ -57,7 +56,6 @@ u32 dss_of_port_get_port_number(struct device_node *port)
return reg;
}
-EXPORT_SYMBOL_GPL(dss_of_port_get_port_number);
struct omap_dss_device *
omapdss_of_find_source_for_first_ep(struct device_node *node)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 04300b2da1b1..0b908e9de792 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -48,8 +48,6 @@
#include "omapdss.h"
#include "dss.h"
-#define DSS_SZ_REGS SZ_512
-
struct dss_reg {
u16 idx;
};
@@ -64,16 +62,19 @@ struct dss_reg {
#define DSS_PLL_CONTROL DSS_REG(0x0048)
#define DSS_SDI_STATUS DSS_REG(0x005C)
-#define REG_GET(idx, start, end) \
- FLD_GET(dss_read_reg(idx), start, end)
+#define REG_GET(dss, idx, start, end) \
+ FLD_GET(dss_read_reg(dss, idx), start, end)
-#define REG_FLD_MOD(idx, val, start, end) \
- dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
+#define REG_FLD_MOD(dss, idx, val, start, end) \
+ dss_write_reg(dss, idx, \
+ FLD_MOD(dss_read_reg(dss, idx), val, start, end))
struct dss_ops {
- int (*dpi_select_source)(int port, enum omap_channel channel);
- int (*select_lcd_source)(enum omap_channel channel,
- enum dss_clk_source clk_src);
+ int (*dpi_select_source)(struct dss_device *dss, int port,
+ enum omap_channel channel);
+ int (*select_lcd_source)(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src);
};
struct dss_features {
@@ -90,33 +91,6 @@ struct dss_features {
bool has_lcd_clk_src;
};
-static struct {
- struct platform_device *pdev;
- void __iomem *base;
- struct regmap *syscon_pll_ctrl;
- u32 syscon_pll_ctrl_offset;
-
- struct clk *parent_clk;
- struct clk *dss_clk;
- unsigned long dss_clk_rate;
-
- unsigned long cache_req_pck;
- unsigned long cache_prate;
- struct dispc_clock_info cache_dispc_cinfo;
-
- enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
- enum dss_clk_source dispc_clk_source;
- enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
-
- bool ctx_valid;
- u32 ctx[DSS_SZ_REGS / sizeof(u32)];
-
- const struct dss_features *feat;
-
- struct dss_pll *video1_pll;
- struct dss_pll *video2_pll;
-} dss;
-
static const char * const dss_generic_clk_source_names[] = {
[DSS_CLK_SRC_FCK] = "FCK",
[DSS_CLK_SRC_PLL1_1] = "PLL1:1",
@@ -128,49 +102,50 @@ static const char * const dss_generic_clk_source_names[] = {
[DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL",
};
-static inline void dss_write_reg(const struct dss_reg idx, u32 val)
+static inline void dss_write_reg(struct dss_device *dss,
+ const struct dss_reg idx, u32 val)
{
- __raw_writel(val, dss.base + idx.idx);
+ __raw_writel(val, dss->base + idx.idx);
}
-static inline u32 dss_read_reg(const struct dss_reg idx)
+static inline u32 dss_read_reg(struct dss_device *dss, const struct dss_reg idx)
{
- return __raw_readl(dss.base + idx.idx);
+ return __raw_readl(dss->base + idx.idx);
}
-#define SR(reg) \
- dss.ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(DSS_##reg)
-#define RR(reg) \
- dss_write_reg(DSS_##reg, dss.ctx[(DSS_##reg).idx / sizeof(u32)])
+#define SR(dss, reg) \
+ dss->ctx[(DSS_##reg).idx / sizeof(u32)] = dss_read_reg(dss, DSS_##reg)
+#define RR(dss, reg) \
+ dss_write_reg(dss, DSS_##reg, dss->ctx[(DSS_##reg).idx / sizeof(u32)])
-static void dss_save_context(void)
+static void dss_save_context(struct dss_device *dss)
{
DSSDBG("dss_save_context\n");
- SR(CONTROL);
+ SR(dss, CONTROL);
- if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
- SR(SDI_CONTROL);
- SR(PLL_CONTROL);
+ if (dss->feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
+ SR(dss, SDI_CONTROL);
+ SR(dss, PLL_CONTROL);
}
- dss.ctx_valid = true;
+ dss->ctx_valid = true;
DSSDBG("context saved\n");
}
-static void dss_restore_context(void)
+static void dss_restore_context(struct dss_device *dss)
{
DSSDBG("dss_restore_context\n");
- if (!dss.ctx_valid)
+ if (!dss->ctx_valid)
return;
- RR(CONTROL);
+ RR(dss, CONTROL);
- if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
- RR(SDI_CONTROL);
- RR(PLL_CONTROL);
+ if (dss->feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
+ RR(dss, SDI_CONTROL);
+ RR(dss, PLL_CONTROL);
}
DSSDBG("context restored\n");
@@ -179,17 +154,17 @@ static void dss_restore_context(void)
#undef SR
#undef RR
-void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
+void dss_ctrl_pll_enable(struct dss_pll *pll, bool enable)
{
- unsigned shift;
- unsigned val;
+ unsigned int shift;
+ unsigned int val;
- if (!dss.syscon_pll_ctrl)
+ if (!pll->dss->syscon_pll_ctrl)
return;
val = !enable;
- switch (pll_id) {
+ switch (pll->id) {
case DSS_PLL_VIDEO1:
shift = 0;
break;
@@ -200,20 +175,22 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
shift = 2;
break;
default:
- DSSERR("illegal DSS PLL ID %d\n", pll_id);
+ DSSERR("illegal DSS PLL ID %d\n", pll->id);
return;
}
- regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
- 1 << shift, val << shift);
+ regmap_update_bits(pll->dss->syscon_pll_ctrl,
+ pll->dss->syscon_pll_ctrl_offset,
+ 1 << shift, val << shift);
}
-static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src,
- enum omap_channel channel)
+static int dss_ctrl_pll_set_control_mux(struct dss_device *dss,
+ enum dss_clk_source clk_src,
+ enum omap_channel channel)
{
- unsigned shift, val;
+ unsigned int shift, val;
- if (!dss.syscon_pll_ctrl)
+ if (!dss->syscon_pll_ctrl)
return -EINVAL;
switch (channel) {
@@ -268,47 +245,47 @@ static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src,
return -EINVAL;
}
- regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
+ regmap_update_bits(dss->syscon_pll_ctrl, dss->syscon_pll_ctrl_offset,
0x3 << shift, val << shift);
return 0;
}
-void dss_sdi_init(int datapairs)
+void dss_sdi_init(struct dss_device *dss, int datapairs)
{
u32 l;
BUG_ON(datapairs > 3 || datapairs < 1);
- l = dss_read_reg(DSS_SDI_CONTROL);
+ l = dss_read_reg(dss, DSS_SDI_CONTROL);
l = FLD_MOD(l, 0xf, 19, 15); /* SDI_PDIV */
l = FLD_MOD(l, datapairs-1, 3, 2); /* SDI_PRSEL */
l = FLD_MOD(l, 2, 1, 0); /* SDI_BWSEL */
- dss_write_reg(DSS_SDI_CONTROL, l);
+ dss_write_reg(dss, DSS_SDI_CONTROL, l);
- l = dss_read_reg(DSS_PLL_CONTROL);
+ l = dss_read_reg(dss, DSS_PLL_CONTROL);
l = FLD_MOD(l, 0x7, 25, 22); /* SDI_PLL_FREQSEL */
l = FLD_MOD(l, 0xb, 16, 11); /* SDI_PLL_REGN */
l = FLD_MOD(l, 0xb4, 10, 1); /* SDI_PLL_REGM */
- dss_write_reg(DSS_PLL_CONTROL, l);
+ dss_write_reg(dss, DSS_PLL_CONTROL, l);
}
-int dss_sdi_enable(void)
+int dss_sdi_enable(struct dss_device *dss)
{
unsigned long timeout;
- dispc_pck_free_enable(1);
+ dispc_pck_free_enable(dss->dispc, 1);
/* Reset SDI PLL */
- REG_FLD_MOD(DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
+ REG_FLD_MOD(dss, DSS_PLL_CONTROL, 1, 18, 18); /* SDI_PLL_SYSRESET */
udelay(1); /* wait 2x PCLK */
/* Lock SDI PLL */
- REG_FLD_MOD(DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
+ REG_FLD_MOD(dss, DSS_PLL_CONTROL, 1, 28, 28); /* SDI_PLL_GOBIT */
/* Waiting for PLL lock request to complete */
timeout = jiffies + msecs_to_jiffies(500);
- while (dss_read_reg(DSS_SDI_STATUS) & (1 << 6)) {
+ while (dss_read_reg(dss, DSS_SDI_STATUS) & (1 << 6)) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("PLL lock request timed out\n");
goto err1;
@@ -316,22 +293,22 @@ int dss_sdi_enable(void)
}
/* Clearing PLL_GO bit */
- REG_FLD_MOD(DSS_PLL_CONTROL, 0, 28, 28);
+ REG_FLD_MOD(dss, DSS_PLL_CONTROL, 0, 28, 28);
/* Waiting for PLL to lock */
timeout = jiffies + msecs_to_jiffies(500);
- while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 5))) {
+ while (!(dss_read_reg(dss, DSS_SDI_STATUS) & (1 << 5))) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("PLL lock timed out\n");
goto err1;
}
}
- dispc_lcd_enable_signal(1);
+ dispc_lcd_enable_signal(dss->dispc, 1);
/* Waiting for SDI reset to complete */
timeout = jiffies + msecs_to_jiffies(500);
- while (!(dss_read_reg(DSS_SDI_STATUS) & (1 << 2))) {
+ while (!(dss_read_reg(dss, DSS_SDI_STATUS) & (1 << 2))) {
if (time_after_eq(jiffies, timeout)) {
DSSERR("SDI reset timed out\n");
goto err2;
@@ -341,24 +318,24 @@ int dss_sdi_enable(void)
return 0;
err2:
- dispc_lcd_enable_signal(0);
+ dispc_lcd_enable_signal(dss->dispc, 0);
err1:
/* Reset SDI PLL */
- REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
+ REG_FLD_MOD(dss, DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
- dispc_pck_free_enable(0);
+ dispc_pck_free_enable(dss->dispc, 0);
return -ETIMEDOUT;
}
-void dss_sdi_disable(void)
+void dss_sdi_disable(struct dss_device *dss)
{
- dispc_lcd_enable_signal(0);
+ dispc_lcd_enable_signal(dss->dispc, 0);
- dispc_pck_free_enable(0);
+ dispc_pck_free_enable(dss->dispc, 0);
/* Reset SDI PLL */
- REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
+ REG_FLD_MOD(dss, DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
}
const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
@@ -366,48 +343,61 @@ const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
return dss_generic_clk_source_names[clk_src];
}
-#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-static void dss_dump_clocks(struct seq_file *s)
+static void dss_dump_clocks(struct dss_device *dss, struct seq_file *s)
{
const char *fclk_name;
unsigned long fclk_rate;
- if (dss_runtime_get())
+ if (dss_runtime_get(dss))
return;
seq_printf(s, "- DSS -\n");
fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK);
- fclk_rate = clk_get_rate(dss.dss_clk);
+ fclk_rate = clk_get_rate(dss->dss_clk);
seq_printf(s, "%s = %lu\n",
fclk_name,
fclk_rate);
- dss_runtime_put();
+ dss_runtime_put(dss);
}
-#endif
-static void dss_dump_regs(struct seq_file *s)
+static int dss_dump_regs(struct seq_file *s, void *p)
{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
+ struct dss_device *dss = s->private;
- if (dss_runtime_get())
- return;
+#define DUMPREG(dss, r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(dss, r))
+
+ if (dss_runtime_get(dss))
+ return 0;
- DUMPREG(DSS_REVISION);
- DUMPREG(DSS_SYSCONFIG);
- DUMPREG(DSS_SYSSTATUS);
- DUMPREG(DSS_CONTROL);
+ DUMPREG(dss, DSS_REVISION);
+ DUMPREG(dss, DSS_SYSCONFIG);
+ DUMPREG(dss, DSS_SYSSTATUS);
+ DUMPREG(dss, DSS_CONTROL);
- if (dss.feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
- DUMPREG(DSS_SDI_CONTROL);
- DUMPREG(DSS_PLL_CONTROL);
- DUMPREG(DSS_SDI_STATUS);
+ if (dss->feat->outputs[OMAP_DSS_CHANNEL_LCD] & OMAP_DSS_OUTPUT_SDI) {
+ DUMPREG(dss, DSS_SDI_CONTROL);
+ DUMPREG(dss, DSS_PLL_CONTROL);
+ DUMPREG(dss, DSS_SDI_STATUS);
}
- dss_runtime_put();
+ dss_runtime_put(dss);
#undef DUMPREG
+ return 0;
+}
+
+static int dss_debug_dump_clocks(struct seq_file *s, void *p)
+{
+ struct dss_device *dss = s->private;
+
+ dss_dump_clocks(dss, s);
+ dispc_dump_clocks(dss->dispc, s);
+#ifdef CONFIG_OMAP2_DSS_DSI
+ dsi_dump_clocks(s);
+#endif
+ return 0;
}
static int dss_get_channel_index(enum omap_channel channel)
@@ -425,7 +415,8 @@ static int dss_get_channel_index(enum omap_channel channel)
}
}
-static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
+static void dss_select_dispc_clk_source(struct dss_device *dss,
+ enum dss_clk_source clk_src)
{
int b;
@@ -433,7 +424,7 @@ static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
* We always use PRCM clock as the DISPC func clock, except on DSS3,
* where we don't have separate DISPC and LCD clock sources.
*/
- if (WARN_ON(dss.feat->has_lcd_clk_src && clk_src != DSS_CLK_SRC_FCK))
+ if (WARN_ON(dss->feat->has_lcd_clk_src && clk_src != DSS_CLK_SRC_FCK))
return;
switch (clk_src) {
@@ -451,15 +442,15 @@ static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
return;
}
- REG_FLD_MOD(DSS_CONTROL, b, /* DISPC_CLK_SWITCH */
- dss.feat->dispc_clk_switch.start,
- dss.feat->dispc_clk_switch.end);
+ REG_FLD_MOD(dss, DSS_CONTROL, b, /* DISPC_CLK_SWITCH */
+ dss->feat->dispc_clk_switch.start,
+ dss->feat->dispc_clk_switch.end);
- dss.dispc_clk_source = clk_src;
+ dss->dispc_clk_source = clk_src;
}
-void dss_select_dsi_clk_source(int dsi_module,
- enum dss_clk_source clk_src)
+void dss_select_dsi_clk_source(struct dss_device *dss, int dsi_module,
+ enum dss_clk_source clk_src)
{
int b, pos;
@@ -481,13 +472,14 @@ void dss_select_dsi_clk_source(int dsi_module,
}
pos = dsi_module == 0 ? 1 : 10;
- REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */
+ REG_FLD_MOD(dss, DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */
- dss.dsi_clk_source[dsi_module] = clk_src;
+ dss->dsi_clk_source[dsi_module] = clk_src;
}
-static int dss_lcd_clk_mux_dra7(enum omap_channel channel,
- enum dss_clk_source clk_src)
+static int dss_lcd_clk_mux_dra7(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src)
{
const u8 ctrl_bits[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
@@ -500,21 +492,22 @@ static int dss_lcd_clk_mux_dra7(enum omap_channel channel,
if (clk_src == DSS_CLK_SRC_FCK) {
/* LCDx_CLK_SWITCH */
- REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
return -EINVAL;
}
- r = dss_ctrl_pll_set_control_mux(clk_src, channel);
+ r = dss_ctrl_pll_set_control_mux(dss, clk_src, channel);
if (r)
return r;
- REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
return 0;
}
-static int dss_lcd_clk_mux_omap5(enum omap_channel channel,
- enum dss_clk_source clk_src)
+static int dss_lcd_clk_mux_omap5(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src)
{
const u8 ctrl_bits[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
@@ -531,20 +524,21 @@ static int dss_lcd_clk_mux_omap5(enum omap_channel channel,
if (clk_src == DSS_CLK_SRC_FCK) {
/* LCDx_CLK_SWITCH */
- REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
return -EINVAL;
}
if (WARN_ON(allowed_plls[channel] != clk_src))
return -EINVAL;
- REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
return 0;
}
-static int dss_lcd_clk_mux_omap4(enum omap_channel channel,
- enum dss_clk_source clk_src)
+static int dss_lcd_clk_mux_omap4(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src)
{
const u8 ctrl_bits[] = {
[OMAP_DSS_CHANNEL_LCD] = 0,
@@ -559,87 +553,90 @@ static int dss_lcd_clk_mux_omap4(enum omap_channel channel,
if (clk_src == DSS_CLK_SRC_FCK) {
/* LCDx_CLK_SWITCH */
- REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
return 0;
}
if (WARN_ON(allowed_plls[channel] != clk_src))
return -EINVAL;
- REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
+ REG_FLD_MOD(dss, DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
return 0;
}
-void dss_select_lcd_clk_source(enum omap_channel channel,
- enum dss_clk_source clk_src)
+void dss_select_lcd_clk_source(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src)
{
int idx = dss_get_channel_index(channel);
int r;
- if (!dss.feat->has_lcd_clk_src) {
- dss_select_dispc_clk_source(clk_src);
- dss.lcd_clk_source[idx] = clk_src;
+ if (!dss->feat->has_lcd_clk_src) {
+ dss_select_dispc_clk_source(dss, clk_src);
+ dss->lcd_clk_source[idx] = clk_src;
return;
}
- r = dss.feat->ops->select_lcd_source(channel, clk_src);
+ r = dss->feat->ops->select_lcd_source(dss, channel, clk_src);
if (r)
return;
- dss.lcd_clk_source[idx] = clk_src;
+ dss->lcd_clk_source[idx] = clk_src;
}
-enum dss_clk_source dss_get_dispc_clk_source(void)
+enum dss_clk_source dss_get_dispc_clk_source(struct dss_device *dss)
{
- return dss.dispc_clk_source;
+ return dss->dispc_clk_source;
}
-enum dss_clk_source dss_get_dsi_clk_source(int dsi_module)
+enum dss_clk_source dss_get_dsi_clk_source(struct dss_device *dss,
+ int dsi_module)
{
- return dss.dsi_clk_source[dsi_module];
+ return dss->dsi_clk_source[dsi_module];
}
-enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
+enum dss_clk_source dss_get_lcd_clk_source(struct dss_device *dss,
+ enum omap_channel channel)
{
- if (dss.feat->has_lcd_clk_src) {
+ if (dss->feat->has_lcd_clk_src) {
int idx = dss_get_channel_index(channel);
- return dss.lcd_clk_source[idx];
+ return dss->lcd_clk_source[idx];
} else {
/* LCD_CLK source is the same as DISPC_FCLK source for
* OMAP2 and OMAP3 */
- return dss.dispc_clk_source;
+ return dss->dispc_clk_source;
}
}
-bool dss_div_calc(unsigned long pck, unsigned long fck_min,
- dss_div_calc_func func, void *data)
+bool dss_div_calc(struct dss_device *dss, unsigned long pck,
+ unsigned long fck_min, dss_div_calc_func func, void *data)
{
int fckd, fckd_start, fckd_stop;
unsigned long fck;
unsigned long fck_hw_max;
unsigned long fckd_hw_max;
unsigned long prate;
- unsigned m;
+ unsigned int m;
- fck_hw_max = dss.feat->fck_freq_max;
+ fck_hw_max = dss->feat->fck_freq_max;
- if (dss.parent_clk == NULL) {
- unsigned pckd;
+ if (dss->parent_clk == NULL) {
+ unsigned int pckd;
pckd = fck_hw_max / pck;
fck = pck * pckd;
- fck = clk_round_rate(dss.dss_clk, fck);
+ fck = clk_round_rate(dss->dss_clk, fck);
return func(fck, data);
}
- fckd_hw_max = dss.feat->fck_div_max;
+ fckd_hw_max = dss->feat->fck_div_max;
- m = dss.feat->dss_fck_multiplier;
- prate = clk_get_rate(dss.parent_clk);
+ m = dss->feat->dss_fck_multiplier;
+ prate = clk_get_rate(dss->parent_clk);
fck_min = fck_min ? fck_min : 1;
@@ -656,67 +653,68 @@ bool dss_div_calc(unsigned long pck, unsigned long fck_min,
return false;
}
-int dss_set_fck_rate(unsigned long rate)
+int dss_set_fck_rate(struct dss_device *dss, unsigned long rate)
{
int r;
DSSDBG("set fck to %lu\n", rate);
- r = clk_set_rate(dss.dss_clk, rate);
+ r = clk_set_rate(dss->dss_clk, rate);
if (r)
return r;
- dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
+ dss->dss_clk_rate = clk_get_rate(dss->dss_clk);
- WARN_ONCE(dss.dss_clk_rate != rate,
- "clk rate mismatch: %lu != %lu", dss.dss_clk_rate,
- rate);
+ WARN_ONCE(dss->dss_clk_rate != rate, "clk rate mismatch: %lu != %lu",
+ dss->dss_clk_rate, rate);
return 0;
}
-unsigned long dss_get_dispc_clk_rate(void)
+unsigned long dss_get_dispc_clk_rate(struct dss_device *dss)
{
- return dss.dss_clk_rate;
+ return dss->dss_clk_rate;
}
-unsigned long dss_get_max_fck_rate(void)
+unsigned long dss_get_max_fck_rate(struct dss_device *dss)
{
- return dss.feat->fck_freq_max;
+ return dss->feat->fck_freq_max;
}
-enum omap_dss_output_id dss_get_supported_outputs(enum omap_channel channel)
+enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss,
+ enum omap_channel channel)
{
- return dss.feat->outputs[channel];
+ return dss->feat->outputs[channel];
}
-static int dss_setup_default_clock(void)
+static int dss_setup_default_clock(struct dss_device *dss)
{
unsigned long max_dss_fck, prate;
unsigned long fck;
- unsigned fck_div;
+ unsigned int fck_div;
int r;
- max_dss_fck = dss.feat->fck_freq_max;
+ max_dss_fck = dss->feat->fck_freq_max;
- if (dss.parent_clk == NULL) {
- fck = clk_round_rate(dss.dss_clk, max_dss_fck);
+ if (dss->parent_clk == NULL) {
+ fck = clk_round_rate(dss->dss_clk, max_dss_fck);
} else {
- prate = clk_get_rate(dss.parent_clk);
+ prate = clk_get_rate(dss->parent_clk);
- fck_div = DIV_ROUND_UP(prate * dss.feat->dss_fck_multiplier,
+ fck_div = DIV_ROUND_UP(prate * dss->feat->dss_fck_multiplier,
max_dss_fck);
- fck = DIV_ROUND_UP(prate, fck_div) * dss.feat->dss_fck_multiplier;
+ fck = DIV_ROUND_UP(prate, fck_div)
+ * dss->feat->dss_fck_multiplier;
}
- r = dss_set_fck_rate(fck);
+ r = dss_set_fck_rate(dss, fck);
if (r)
return r;
return 0;
}
-void dss_set_venc_output(enum omap_dss_venc_type type)
+void dss_set_venc_output(struct dss_device *dss, enum omap_dss_venc_type type)
{
int l = 0;
@@ -728,19 +726,21 @@ void dss_set_venc_output(enum omap_dss_venc_type type)
BUG();
/* venc out selection. 0 = comp, 1 = svideo */
- REG_FLD_MOD(DSS_CONTROL, l, 6, 6);
+ REG_FLD_MOD(dss, DSS_CONTROL, l, 6, 6);
}
-void dss_set_dac_pwrdn_bgz(bool enable)
+void dss_set_dac_pwrdn_bgz(struct dss_device *dss, bool enable)
{
- REG_FLD_MOD(DSS_CONTROL, enable, 5, 5); /* DAC Power-Down Control */
+ /* DAC Power-Down Control */
+ REG_FLD_MOD(dss, DSS_CONTROL, enable, 5, 5);
}
-void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select src)
+void dss_select_hdmi_venc_clk_source(struct dss_device *dss,
+ enum dss_hdmi_venc_clk_source_select src)
{
enum omap_dss_output_id outputs;
- outputs = dss.feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
+ outputs = dss->feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
/* Complain about invalid selections */
WARN_ON((src == DSS_VENC_TV_CLK) && !(outputs & OMAP_DSS_OUTPUT_VENC));
@@ -749,24 +749,12 @@ void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select src)
/* Select only if we have options */
if ((outputs & OMAP_DSS_OUTPUT_VENC) &&
(outputs & OMAP_DSS_OUTPUT_HDMI))
- REG_FLD_MOD(DSS_CONTROL, src, 15, 15); /* VENC_HDMI_SWITCH */
-}
-
-enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void)
-{
- enum omap_dss_output_id outputs;
-
- outputs = dss.feat->outputs[OMAP_DSS_CHANNEL_DIGIT];
- if ((outputs & OMAP_DSS_OUTPUT_HDMI) == 0)
- return DSS_VENC_TV_CLK;
-
- if ((outputs & OMAP_DSS_OUTPUT_VENC) == 0)
- return DSS_HDMI_M_PCLK;
-
- return REG_GET(DSS_CONTROL, 15, 15);
+ /* VENC_HDMI_SWITCH */
+ REG_FLD_MOD(dss, DSS_CONTROL, src, 15, 15);
}
-static int dss_dpi_select_source_omap2_omap3(int port, enum omap_channel channel)
+static int dss_dpi_select_source_omap2_omap3(struct dss_device *dss, int port,
+ enum omap_channel channel)
{
if (channel != OMAP_DSS_CHANNEL_LCD)
return -EINVAL;
@@ -774,7 +762,8 @@ static int dss_dpi_select_source_omap2_omap3(int port, enum omap_channel channel
return 0;
}
-static int dss_dpi_select_source_omap4(int port, enum omap_channel channel)
+static int dss_dpi_select_source_omap4(struct dss_device *dss, int port,
+ enum omap_channel channel)
{
int val;
@@ -789,12 +778,13 @@ static int dss_dpi_select_source_omap4(int port, enum omap_channel channel)
return -EINVAL;
}
- REG_FLD_MOD(DSS_CONTROL, val, 17, 17);
+ REG_FLD_MOD(dss, DSS_CONTROL, val, 17, 17);
return 0;
}
-static int dss_dpi_select_source_omap5(int port, enum omap_channel channel)
+static int dss_dpi_select_source_omap5(struct dss_device *dss, int port,
+ enum omap_channel channel)
{
int val;
@@ -815,16 +805,17 @@ static int dss_dpi_select_source_omap5(int port, enum omap_channel channel)
return -EINVAL;
}
- REG_FLD_MOD(DSS_CONTROL, val, 17, 16);
+ REG_FLD_MOD(dss, DSS_CONTROL, val, 17, 16);
return 0;
}
-static int dss_dpi_select_source_dra7xx(int port, enum omap_channel channel)
+static int dss_dpi_select_source_dra7xx(struct dss_device *dss, int port,
+ enum omap_channel channel)
{
switch (port) {
case 0:
- return dss_dpi_select_source_omap5(port, channel);
+ return dss_dpi_select_source_omap5(dss, port, channel);
case 1:
if (channel != OMAP_DSS_CHANNEL_LCD2)
return -EINVAL;
@@ -840,135 +831,153 @@ static int dss_dpi_select_source_dra7xx(int port, enum omap_channel channel)
return 0;
}
-int dss_dpi_select_source(int port, enum omap_channel channel)
+int dss_dpi_select_source(struct dss_device *dss, int port,
+ enum omap_channel channel)
{
- return dss.feat->ops->dpi_select_source(port, channel);
+ return dss->feat->ops->dpi_select_source(dss, port, channel);
}
-static int dss_get_clocks(void)
+static int dss_get_clocks(struct dss_device *dss)
{
struct clk *clk;
- clk = devm_clk_get(&dss.pdev->dev, "fck");
+ clk = devm_clk_get(&dss->pdev->dev, "fck");
if (IS_ERR(clk)) {
DSSERR("can't get clock fck\n");
return PTR_ERR(clk);
}
- dss.dss_clk = clk;
+ dss->dss_clk = clk;
- if (dss.feat->parent_clk_name) {
- clk = clk_get(NULL, dss.feat->parent_clk_name);
+ if (dss->feat->parent_clk_name) {
+ clk = clk_get(NULL, dss->feat->parent_clk_name);
if (IS_ERR(clk)) {
- DSSERR("Failed to get %s\n", dss.feat->parent_clk_name);
+ DSSERR("Failed to get %s\n",
+ dss->feat->parent_clk_name);
return PTR_ERR(clk);
}
} else {
clk = NULL;
}
- dss.parent_clk = clk;
+ dss->parent_clk = clk;
return 0;
}
-static void dss_put_clocks(void)
+static void dss_put_clocks(struct dss_device *dss)
{
- if (dss.parent_clk)
- clk_put(dss.parent_clk);
+ if (dss->parent_clk)
+ clk_put(dss->parent_clk);
}
-int dss_runtime_get(void)
+int dss_runtime_get(struct dss_device *dss)
{
int r;
DSSDBG("dss_runtime_get\n");
- r = pm_runtime_get_sync(&dss.pdev->dev);
+ r = pm_runtime_get_sync(&dss->pdev->dev);
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
-void dss_runtime_put(void)
+void dss_runtime_put(struct dss_device *dss)
{
int r;
DSSDBG("dss_runtime_put\n");
- r = pm_runtime_put_sync(&dss.pdev->dev);
+ r = pm_runtime_put_sync(&dss->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS && r != -EBUSY);
}
-/* DEBUGFS */
-#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-static void dss_debug_dump_clocks(struct seq_file *s)
+struct dss_device *dss_get_device(struct device *dev)
{
- dss_dump_clocks(s);
- dispc_dump_clocks(s);
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_dump_clocks(s);
-#endif
+ return dev_get_drvdata(dev);
}
-static int dss_debug_show(struct seq_file *s, void *unused)
+/* DEBUGFS */
+#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
+static int dss_initialize_debugfs(struct dss_device *dss)
{
- void (*func)(struct seq_file *) = s->private;
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("omapdss", NULL);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ dss->debugfs.root = dir;
- func(s);
return 0;
}
+static void dss_uninitialize_debugfs(struct dss_device *dss)
+{
+ debugfs_remove_recursive(dss->debugfs.root);
+}
+
+struct dss_debugfs_entry {
+ struct dentry *dentry;
+ int (*show_fn)(struct seq_file *s, void *data);
+ void *data;
+};
+
static int dss_debug_open(struct inode *inode, struct file *file)
{
- return single_open(file, dss_debug_show, inode->i_private);
+ struct dss_debugfs_entry *entry = inode->i_private;
+
+ return single_open(file, entry->show_fn, entry->data);
}
static const struct file_operations dss_debug_fops = {
- .open = dss_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
+ .open = dss_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
-static struct dentry *dss_debugfs_dir;
-
-static int dss_initialize_debugfs(void)
+struct dss_debugfs_entry *
+dss_debugfs_create_file(struct dss_device *dss, const char *name,
+ int (*show_fn)(struct seq_file *s, void *data),
+ void *data)
{
- dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
- if (IS_ERR(dss_debugfs_dir)) {
- int err = PTR_ERR(dss_debugfs_dir);
+ struct dss_debugfs_entry *entry;
+ struct dentry *d;
- dss_debugfs_dir = NULL;
- return err;
- }
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return ERR_PTR(-ENOMEM);
- debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
- &dss_debug_dump_clocks, &dss_debug_fops);
+ entry->show_fn = show_fn;
+ entry->data = data;
- return 0;
-}
+ d = debugfs_create_file(name, 0444, dss->debugfs.root, entry,
+ &dss_debug_fops);
+ if (IS_ERR(d)) {
+ kfree(entry);
+ return ERR_PTR(PTR_ERR(d));
+ }
-static void dss_uninitialize_debugfs(void)
-{
- if (dss_debugfs_dir)
- debugfs_remove_recursive(dss_debugfs_dir);
+ entry->dentry = d;
+ return entry;
}
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
+void dss_debugfs_remove_file(struct dss_debugfs_entry *entry)
{
- struct dentry *d;
-
- d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
- write, &dss_debug_fops);
+ if (IS_ERR_OR_NULL(entry))
+ return;
- return PTR_ERR_OR_ZERO(d);
+ debugfs_remove(entry->dentry);
+ kfree(entry);
}
+
#else /* CONFIG_OMAP2_DSS_DEBUGFS */
-static inline int dss_initialize_debugfs(void)
+static inline int dss_initialize_debugfs(struct dss_device *dss)
{
return 0;
}
-static inline void dss_uninitialize_debugfs(void)
+static inline void dss_uninitialize_debugfs(struct dss_device *dss)
{
}
#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
@@ -1169,23 +1178,24 @@ static const struct dss_features dra7xx_dss_feats = {
.has_lcd_clk_src = true,
};
-static int dss_init_ports(struct platform_device *pdev)
+static int dss_init_ports(struct dss_device *dss)
{
+ struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
int i;
- for (i = 0; i < dss.feat->num_ports; i++) {
+ for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
if (!port)
continue;
- switch (dss.feat->ports[i]) {
+ switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_init_port(pdev, port, dss.feat->model);
+ dpi_init_port(dss, pdev, port, dss->feat->model);
break;
case OMAP_DISPLAY_TYPE_SDI:
- sdi_init_port(pdev, port);
+ sdi_init_port(dss, pdev, port);
break;
default:
break;
@@ -1195,18 +1205,19 @@ static int dss_init_ports(struct platform_device *pdev)
return 0;
}
-static void dss_uninit_ports(struct platform_device *pdev)
+static void dss_uninit_ports(struct dss_device *dss)
{
+ struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
int i;
- for (i = 0; i < dss.feat->num_ports; i++) {
+ for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
if (!port)
continue;
- switch (dss.feat->ports[i]) {
+ switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
dpi_uninit_port(port);
break;
@@ -1219,8 +1230,9 @@ static void dss_uninit_ports(struct platform_device *pdev)
}
}
-static int dss_video_pll_probe(struct platform_device *pdev)
+static int dss_video_pll_probe(struct dss_device *dss)
{
+ struct platform_device *pdev = dss->pdev;
struct device_node *np = pdev->dev.of_node;
struct regulator *pll_regulator;
int r;
@@ -1229,16 +1241,16 @@ static int dss_video_pll_probe(struct platform_device *pdev)
return 0;
if (of_property_read_bool(np, "syscon-pll-ctrl")) {
- dss.syscon_pll_ctrl = syscon_regmap_lookup_by_phandle(np,
+ dss->syscon_pll_ctrl = syscon_regmap_lookup_by_phandle(np,
"syscon-pll-ctrl");
- if (IS_ERR(dss.syscon_pll_ctrl)) {
+ if (IS_ERR(dss->syscon_pll_ctrl)) {
dev_err(&pdev->dev,
"failed to get syscon-pll-ctrl regmap\n");
- return PTR_ERR(dss.syscon_pll_ctrl);
+ return PTR_ERR(dss->syscon_pll_ctrl);
}
if (of_property_read_u32_index(np, "syscon-pll-ctrl", 1,
- &dss.syscon_pll_ctrl_offset)) {
+ &dss->syscon_pll_ctrl_offset)) {
dev_err(&pdev->dev,
"failed to get syscon-pll-ctrl offset\n");
return -EINVAL;
@@ -1264,16 +1276,18 @@ static int dss_video_pll_probe(struct platform_device *pdev)
}
if (of_property_match_string(np, "reg-names", "pll1") >= 0) {
- dss.video1_pll = dss_video_pll_init(pdev, 0, pll_regulator);
- if (IS_ERR(dss.video1_pll))
- return PTR_ERR(dss.video1_pll);
+ dss->video1_pll = dss_video_pll_init(dss, pdev, 0,
+ pll_regulator);
+ if (IS_ERR(dss->video1_pll))
+ return PTR_ERR(dss->video1_pll);
}
if (of_property_match_string(np, "reg-names", "pll2") >= 0) {
- dss.video2_pll = dss_video_pll_init(pdev, 1, pll_regulator);
- if (IS_ERR(dss.video2_pll)) {
- dss_video_pll_uninit(dss.video1_pll);
- return PTR_ERR(dss.video2_pll);
+ dss->video2_pll = dss_video_pll_init(dss, pdev, 1,
+ pll_regulator);
+ if (IS_ERR(dss->video2_pll)) {
+ dss_video_pll_uninit(dss->video1_pll);
+ return PTR_ERR(dss->video2_pll);
}
}
@@ -1300,109 +1314,26 @@ static const struct soc_device_attribute dss_soc_devices[] = {
static int dss_bind(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct resource *dss_mem;
- u32 rev;
+ struct dss_device *dss = dev_get_drvdata(dev);
int r;
- dss_mem = platform_get_resource(dss.pdev, IORESOURCE_MEM, 0);
- dss.base = devm_ioremap_resource(&pdev->dev, dss_mem);
- if (IS_ERR(dss.base))
- return PTR_ERR(dss.base);
-
- r = dss_get_clocks();
+ r = component_bind_all(dev, NULL);
if (r)
return r;
- r = dss_setup_default_clock();
- if (r)
- goto err_setup_clocks;
-
- r = dss_video_pll_probe(pdev);
- if (r)
- goto err_pll_init;
-
- r = dss_init_ports(pdev);
- if (r)
- goto err_init_ports;
-
- pm_runtime_enable(&pdev->dev);
-
- r = dss_runtime_get();
- if (r)
- goto err_runtime_get;
-
- dss.dss_clk_rate = clk_get_rate(dss.dss_clk);
-
- /* Select DPLL */
- REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
-
- dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
-
-#ifdef CONFIG_OMAP2_DSS_VENC
- REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
- REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
- REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
-#endif
- dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
- dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
- dss.dispc_clk_source = DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
- dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
-
- rev = dss_read_reg(DSS_REVISION);
- pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
- dss_runtime_put();
-
- r = component_bind_all(&pdev->dev, NULL);
- if (r)
- goto err_component;
-
- dss_debugfs_create_file("dss", dss_dump_regs);
-
pm_set_vt_switch(0);
omapdss_gather_components(dev);
- omapdss_set_is_initialized(true);
+ omapdss_set_dss(dss);
return 0;
-
-err_component:
-err_runtime_get:
- pm_runtime_disable(&pdev->dev);
- dss_uninit_ports(pdev);
-err_init_ports:
- if (dss.video1_pll)
- dss_video_pll_uninit(dss.video1_pll);
-
- if (dss.video2_pll)
- dss_video_pll_uninit(dss.video2_pll);
-err_pll_init:
-err_setup_clocks:
- dss_put_clocks();
- return r;
}
static void dss_unbind(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
-
- omapdss_set_is_initialized(false);
+ omapdss_set_dss(NULL);
- component_unbind_all(&pdev->dev, NULL);
-
- if (dss.video1_pll)
- dss_video_pll_uninit(dss.video1_pll);
-
- if (dss.video2_pll)
- dss_video_pll_uninit(dss.video2_pll);
-
- dss_uninit_ports(pdev);
-
- pm_runtime_disable(&pdev->dev);
-
- dss_put_clocks();
+ component_unbind_all(dev, NULL);
}
static const struct component_master_ops dss_component_ops = {
@@ -1434,18 +1365,60 @@ static int dss_add_child_component(struct device *dev, void *data)
return 0;
}
+static int dss_probe_hardware(struct dss_device *dss)
+{
+ u32 rev;
+ int r;
+
+ r = dss_runtime_get(dss);
+ if (r)
+ return r;
+
+ dss->dss_clk_rate = clk_get_rate(dss->dss_clk);
+
+ /* Select DPLL */
+ REG_FLD_MOD(dss, DSS_CONTROL, 0, 0, 0);
+
+ dss_select_dispc_clk_source(dss, DSS_CLK_SRC_FCK);
+
+#ifdef CONFIG_OMAP2_DSS_VENC
+ REG_FLD_MOD(dss, DSS_CONTROL, 1, 4, 4); /* venc dac demen */
+ REG_FLD_MOD(dss, DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
+ REG_FLD_MOD(dss, DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
+#endif
+ dss->dsi_clk_source[0] = DSS_CLK_SRC_FCK;
+ dss->dsi_clk_source[1] = DSS_CLK_SRC_FCK;
+ dss->dispc_clk_source = DSS_CLK_SRC_FCK;
+ dss->lcd_clk_source[0] = DSS_CLK_SRC_FCK;
+ dss->lcd_clk_source[1] = DSS_CLK_SRC_FCK;
+
+ rev = dss_read_reg(dss, DSS_REVISION);
+ pr_info("OMAP DSS rev %d.%d\n", FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ dss_runtime_put(dss);
+
+ return 0;
+}
+
static int dss_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *soc;
struct component_match *match = NULL;
+ struct resource *dss_mem;
+ struct dss_device *dss;
int r;
- dss.pdev = pdev;
+ dss = kzalloc(sizeof(*dss), GFP_KERNEL);
+ if (!dss)
+ return -ENOMEM;
+
+ dss->pdev = pdev;
+ platform_set_drvdata(pdev, dss);
r = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (r) {
dev_err(&pdev->dev, "Failed to set the DMA mask\n");
- return r;
+ goto err_free_dss;
}
/*
@@ -1454,31 +1427,108 @@ static int dss_probe(struct platform_device *pdev)
*/
soc = soc_device_match(dss_soc_devices);
if (soc)
- dss.feat = soc->data;
+ dss->feat = soc->data;
else
- dss.feat = of_match_device(dss_of_match, &pdev->dev)->data;
+ dss->feat = of_match_device(dss_of_match, &pdev->dev)->data;
+
+ /* Map I/O registers, get and setup clocks. */
+ dss_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dss->base = devm_ioremap_resource(&pdev->dev, dss_mem);
+ if (IS_ERR(dss->base)) {
+ r = PTR_ERR(dss->base);
+ goto err_free_dss;
+ }
- r = dss_initialize_debugfs();
+ r = dss_get_clocks(dss);
if (r)
- return r;
+ goto err_free_dss;
+
+ r = dss_setup_default_clock(dss);
+ if (r)
+ goto err_put_clocks;
- /* add all the child devices as components */
+ /* Setup the video PLLs and the DPI and SDI ports. */
+ r = dss_video_pll_probe(dss);
+ if (r)
+ goto err_put_clocks;
+
+ r = dss_init_ports(dss);
+ if (r)
+ goto err_uninit_plls;
+
+ /* Enable runtime PM and probe the hardware. */
+ pm_runtime_enable(&pdev->dev);
+
+ r = dss_probe_hardware(dss);
+ if (r)
+ goto err_pm_runtime_disable;
+
+ /* Initialize debugfs. */
+ r = dss_initialize_debugfs(dss);
+ if (r)
+ goto err_pm_runtime_disable;
+
+ dss->debugfs.clk = dss_debugfs_create_file(dss, "clk",
+ dss_debug_dump_clocks, dss);
+ dss->debugfs.dss = dss_debugfs_create_file(dss, "dss", dss_dump_regs,
+ dss);
+
+ /* Add all the child devices as components. */
device_for_each_child(&pdev->dev, &match, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
- if (r) {
- dss_uninitialize_debugfs();
- return r;
- }
+ if (r)
+ goto err_uninit_debugfs;
return 0;
+
+err_uninit_debugfs:
+ dss_debugfs_remove_file(dss->debugfs.clk);
+ dss_debugfs_remove_file(dss->debugfs.dss);
+ dss_uninitialize_debugfs(dss);
+
+err_pm_runtime_disable:
+ pm_runtime_disable(&pdev->dev);
+ dss_uninit_ports(dss);
+
+err_uninit_plls:
+ if (dss->video1_pll)
+ dss_video_pll_uninit(dss->video1_pll);
+ if (dss->video2_pll)
+ dss_video_pll_uninit(dss->video2_pll);
+
+err_put_clocks:
+ dss_put_clocks(dss);
+
+err_free_dss:
+ kfree(dss);
+
+ return r;
}
static int dss_remove(struct platform_device *pdev)
{
+ struct dss_device *dss = platform_get_drvdata(pdev);
+
component_master_del(&pdev->dev, &dss_component_ops);
- dss_uninitialize_debugfs();
+ dss_debugfs_remove_file(dss->debugfs.clk);
+ dss_debugfs_remove_file(dss->debugfs.dss);
+ dss_uninitialize_debugfs(dss);
+
+ pm_runtime_disable(&pdev->dev);
+
+ dss_uninit_ports(dss);
+
+ if (dss->video1_pll)
+ dss_video_pll_uninit(dss->video1_pll);
+
+ if (dss->video2_pll)
+ dss_video_pll_uninit(dss->video2_pll);
+
+ dss_put_clocks(dss);
+
+ kfree(dss);
return 0;
}
@@ -1500,7 +1550,9 @@ static void dss_shutdown(struct platform_device *pdev)
static int dss_runtime_suspend(struct device *dev)
{
- dss_save_context();
+ struct dss_device *dss = dev_get_drvdata(dev);
+
+ dss_save_context(dss);
dss_set_min_bus_tput(dev, 0);
pinctrl_pm_select_sleep_state(dev);
@@ -1510,6 +1562,7 @@ static int dss_runtime_suspend(struct device *dev)
static int dss_runtime_resume(struct device *dev)
{
+ struct dss_device *dss = dev_get_drvdata(dev);
int r;
pinctrl_pm_select_default_state(dev);
@@ -1525,7 +1578,7 @@ static int dss_runtime_resume(struct device *dev)
if (r)
return r;
- dss_restore_context();
+ dss_restore_context(dss);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 6374e57ed9da..847c78ade024 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -25,6 +25,11 @@
#include "omapdss.h"
+struct dispc_device;
+struct dss_debugfs_entry;
+struct platform_device;
+struct seq_file;
+
#define MAX_DSS_LCD_MANAGERS 3
#define MAX_NUM_DSI 2
@@ -97,17 +102,6 @@ enum dss_dsi_content_type {
DSS_DSI_CONTENT_GENERIC,
};
-enum dss_writeback_channel {
- DSS_WB_LCD1_MGR = 0,
- DSS_WB_LCD2_MGR = 1,
- DSS_WB_TV_MGR = 2,
- DSS_WB_OVL0 = 3,
- DSS_WB_OVL1 = 4,
- DSS_WB_OVL2 = 5,
- DSS_WB_OVL3 = 6,
- DSS_WB_LCD3_MGR = 7,
-};
-
enum dss_clk_source {
DSS_CLK_SRC_FCK = 0,
@@ -167,10 +161,10 @@ struct dss_pll_ops {
struct dss_pll_hw {
enum dss_pll_type type;
- unsigned n_max;
- unsigned m_min;
- unsigned m_max;
- unsigned mX_max;
+ unsigned int n_max;
+ unsigned int m_min;
+ unsigned int m_max;
+ unsigned int mX_max;
unsigned long fint_min, fint_max;
unsigned long clkdco_min, clkdco_low, clkdco_max;
@@ -191,6 +185,7 @@ struct dss_pll_hw {
struct dss_pll {
const char *name;
enum dss_pll_id id;
+ struct dss_device *dss;
struct clk *clkin;
struct regulator *regulator;
@@ -232,8 +227,44 @@ struct dss_lcd_mgr_config {
int lcden_sig_polarity;
};
-struct seq_file;
-struct platform_device;
+#define DSS_SZ_REGS SZ_512
+
+struct dss_device {
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct regmap *syscon_pll_ctrl;
+ u32 syscon_pll_ctrl_offset;
+
+ struct clk *parent_clk;
+ struct clk *dss_clk;
+ unsigned long dss_clk_rate;
+
+ unsigned long cache_req_pck;
+ unsigned long cache_prate;
+ struct dispc_clock_info cache_dispc_cinfo;
+
+ enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
+ enum dss_clk_source dispc_clk_source;
+ enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
+
+ bool ctx_valid;
+ u32 ctx[DSS_SZ_REGS / sizeof(u32)];
+
+ const struct dss_features *feat;
+
+ struct {
+ struct dentry *root;
+ struct dss_debugfs_entry *clk;
+ struct dss_debugfs_entry *dss;
+ } debugfs;
+
+ struct dss_pll *plls[4];
+ struct dss_pll *video1_pll;
+ struct dss_pll *video2_pll;
+
+ struct dispc_device *dispc;
+ const struct dispc_ops *dispc_ops;
+};
/* core */
static inline int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
@@ -253,61 +284,81 @@ static inline bool dss_mgr_is_lcd(enum omap_channel id)
/* DSS */
#if defined(CONFIG_OMAP2_DSS_DEBUGFS)
-int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
+struct dss_debugfs_entry *
+dss_debugfs_create_file(struct dss_device *dss, const char *name,
+ int (*show_fn)(struct seq_file *s, void *data),
+ void *data);
+void dss_debugfs_remove_file(struct dss_debugfs_entry *entry);
#else
-static inline int dss_debugfs_create_file(const char *name,
- void (*write)(struct seq_file *))
+static inline struct dss_debugfs_entry *
+dss_debugfs_create_file(struct dss_device *dss, const char *name,
+ int (*show_fn)(struct seq_file *s, void *data),
+ void *data)
+{
+ return NULL;
+}
+
+static inline void dss_debugfs_remove_file(struct dss_debugfs_entry *entry)
{
- return 0;
}
#endif /* CONFIG_OMAP2_DSS_DEBUGFS */
-int dss_runtime_get(void);
-void dss_runtime_put(void);
+struct dss_device *dss_get_device(struct device *dev);
-unsigned long dss_get_dispc_clk_rate(void);
-unsigned long dss_get_max_fck_rate(void);
-enum omap_dss_output_id dss_get_supported_outputs(enum omap_channel channel);
-int dss_dpi_select_source(int port, enum omap_channel channel);
-void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
-enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
+int dss_runtime_get(struct dss_device *dss);
+void dss_runtime_put(struct dss_device *dss);
+
+unsigned long dss_get_dispc_clk_rate(struct dss_device *dss);
+unsigned long dss_get_max_fck_rate(struct dss_device *dss);
+enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss,
+ enum omap_channel channel);
+int dss_dpi_select_source(struct dss_device *dss, int port,
+ enum omap_channel channel);
+void dss_select_hdmi_venc_clk_source(struct dss_device *dss,
+ enum dss_hdmi_venc_clk_source_select src);
const char *dss_get_clk_source_name(enum dss_clk_source clk_src);
/* DSS VIDEO PLL */
-struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
- struct regulator *regulator);
+struct dss_pll *dss_video_pll_init(struct dss_device *dss,
+ struct platform_device *pdev, int id,
+ struct regulator *regulator);
void dss_video_pll_uninit(struct dss_pll *pll);
-void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
+void dss_ctrl_pll_enable(struct dss_pll *pll, bool enable);
-void dss_sdi_init(int datapairs);
-int dss_sdi_enable(void);
-void dss_sdi_disable(void);
+void dss_sdi_init(struct dss_device *dss, int datapairs);
+int dss_sdi_enable(struct dss_device *dss);
+void dss_sdi_disable(struct dss_device *dss);
-void dss_select_dsi_clk_source(int dsi_module,
- enum dss_clk_source clk_src);
-void dss_select_lcd_clk_source(enum omap_channel channel,
- enum dss_clk_source clk_src);
-enum dss_clk_source dss_get_dispc_clk_source(void);
-enum dss_clk_source dss_get_dsi_clk_source(int dsi_module);
-enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
+void dss_select_dsi_clk_source(struct dss_device *dss, int dsi_module,
+ enum dss_clk_source clk_src);
+void dss_select_lcd_clk_source(struct dss_device *dss,
+ enum omap_channel channel,
+ enum dss_clk_source clk_src);
+enum dss_clk_source dss_get_dispc_clk_source(struct dss_device *dss);
+enum dss_clk_source dss_get_dsi_clk_source(struct dss_device *dss,
+ int dsi_module);
+enum dss_clk_source dss_get_lcd_clk_source(struct dss_device *dss,
+ enum omap_channel channel);
-void dss_set_venc_output(enum omap_dss_venc_type type);
-void dss_set_dac_pwrdn_bgz(bool enable);
+void dss_set_venc_output(struct dss_device *dss, enum omap_dss_venc_type type);
+void dss_set_dac_pwrdn_bgz(struct dss_device *dss, bool enable);
-int dss_set_fck_rate(unsigned long rate);
+int dss_set_fck_rate(struct dss_device *dss, unsigned long rate);
typedef bool (*dss_div_calc_func)(unsigned long fck, void *data);
-bool dss_div_calc(unsigned long pck, unsigned long fck_min,
- dss_div_calc_func func, void *data);
+bool dss_div_calc(struct dss_device *dss, unsigned long pck,
+ unsigned long fck_min, dss_div_calc_func func, void *data);
/* SDI */
#ifdef CONFIG_OMAP2_DSS_SDI
-int sdi_init_port(struct platform_device *pdev, struct device_node *port);
+int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
+ struct device_node *port);
void sdi_uninit_port(struct device_node *port);
#else
-static inline int sdi_init_port(struct platform_device *pdev,
- struct device_node *port)
+static inline int sdi_init_port(struct dss_device *dss,
+ struct platform_device *pdev,
+ struct device_node *port)
{
return 0;
}
@@ -320,9 +371,6 @@ static inline void sdi_uninit_port(struct device_node *port)
#ifdef CONFIG_OMAP2_DSS_DSI
-struct dentry;
-struct file_operations;
-
void dsi_dump_clocks(struct seq_file *s);
void dsi_irq_handler(void);
@@ -331,12 +379,14 @@ void dsi_irq_handler(void);
/* DPI */
#ifdef CONFIG_OMAP2_DSS_DPI
-int dpi_init_port(struct platform_device *pdev, struct device_node *port,
- enum dss_model dss_model);
+int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
+ struct device_node *port, enum dss_model dss_model);
void dpi_uninit_port(struct device_node *port);
#else
-static inline int dpi_init_port(struct platform_device *pdev,
- struct device_node *port, enum dss_model dss_model)
+static inline int dpi_init_port(struct dss_device *dss,
+ struct platform_device *pdev,
+ struct device_node *port,
+ enum dss_model dss_model)
{
return 0;
}
@@ -346,51 +396,49 @@ static inline void dpi_uninit_port(struct device_node *port)
#endif
/* DISPC */
-void dispc_dump_clocks(struct seq_file *s);
+void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s);
-int dispc_runtime_get(void);
-void dispc_runtime_put(void);
+int dispc_runtime_get(struct dispc_device *dispc);
+void dispc_runtime_put(struct dispc_device *dispc);
-void dispc_enable_sidle(void);
-void dispc_disable_sidle(void);
+void dispc_enable_sidle(struct dispc_device *dispc);
+void dispc_disable_sidle(struct dispc_device *dispc);
-void dispc_lcd_enable_signal(bool enable);
-void dispc_pck_free_enable(bool enable);
-void dispc_enable_fifomerge(bool enable);
-void dispc_enable_gamma_table(bool enable);
+void dispc_lcd_enable_signal(struct dispc_device *dispc, bool enable);
+void dispc_pck_free_enable(struct dispc_device *dispc, bool enable);
+void dispc_enable_fifomerge(struct dispc_device *dispc, bool enable);
typedef bool (*dispc_div_calc_func)(int lckd, int pckd, unsigned long lck,
unsigned long pck, void *data);
-bool dispc_div_calc(unsigned long dispc,
- unsigned long pck_min, unsigned long pck_max,
- dispc_div_calc_func func, void *data);
-
-bool dispc_mgr_timings_ok(enum omap_channel channel, const struct videomode *vm);
-int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
- struct dispc_clock_info *cinfo);
-
-
-void dispc_ovl_set_fifo_threshold(enum omap_plane_id plane, u32 low,
- u32 high);
-void dispc_ovl_compute_fifo_thresholds(enum omap_plane_id plane,
- u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
- bool manual_update);
-
-void dispc_mgr_set_clock_div(enum omap_channel channel,
- const struct dispc_clock_info *cinfo);
-int dispc_mgr_get_clock_div(enum omap_channel channel,
- struct dispc_clock_info *cinfo);
-void dispc_set_tv_pclk(unsigned long pclk);
-
-u32 dispc_wb_get_framedone_irq(void);
-bool dispc_wb_go_busy(void);
-void dispc_wb_go(void);
-void dispc_wb_set_channel_in(enum dss_writeback_channel channel);
-int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
- bool mem_to_mem, const struct videomode *vm);
+bool dispc_div_calc(struct dispc_device *dispc, unsigned long dispc_freq,
+ unsigned long pck_min, unsigned long pck_max,
+ dispc_div_calc_func func, void *data);
+
+bool dispc_mgr_timings_ok(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
+int dispc_calc_clock_rates(struct dispc_device *dispc,
+ unsigned long dispc_fclk_rate,
+ struct dispc_clock_info *cinfo);
+
+
+void dispc_ovl_set_fifo_threshold(struct dispc_device *dispc,
+ enum omap_plane_id plane, u32 low, u32 high);
+void dispc_ovl_compute_fifo_thresholds(struct dispc_device *dispc,
+ enum omap_plane_id plane,
+ u32 *fifo_low, u32 *fifo_high,
+ bool use_fifomerge, bool manual_update);
+
+void dispc_mgr_set_clock_div(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dispc_clock_info *cinfo);
+int dispc_mgr_get_clock_div(struct dispc_device *dispc,
+ enum omap_channel channel,
+ struct dispc_clock_info *cinfo);
+void dispc_set_tv_pclk(struct dispc_device *dispc, unsigned long pclk);
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-static inline void dss_collect_irq_stats(u32 irqstatus, unsigned *irq_arr)
+static inline void dss_collect_irq_stats(u32 irqstatus, unsigned int *irq_arr)
{
int b;
for (b = 0; b < 32; ++b) {
@@ -406,11 +454,12 @@ typedef bool (*dss_pll_calc_func)(int n, int m, unsigned long fint,
typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
void *data);
-int dss_pll_register(struct dss_pll *pll);
+int dss_pll_register(struct dss_device *dss, struct dss_pll *pll);
void dss_pll_unregister(struct dss_pll *pll);
-struct dss_pll *dss_pll_find(const char *name);
-struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src);
-unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
+struct dss_pll *dss_pll_find(struct dss_device *dss, const char *name);
+struct dss_pll *dss_pll_find_by_src(struct dss_device *dss,
+ enum dss_clk_source src);
+unsigned int dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
int dss_pll_enable(struct dss_pll *pll);
void dss_pll_disable(struct dss_pll *pll);
int dss_pll_set_config(struct dss_pll *pll,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index c2609c448ddc..3aeb4cabd59f 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -29,6 +29,8 @@
#include "omapdss.h"
#include "dss.h"
+struct dss_device;
+
/* HDMI Wrapper */
#define HDMI_WP_REVISION 0x0
@@ -324,8 +326,8 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
/* HDMI PLL funcs */
void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
-int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
- struct hdmi_wp_data *wp);
+int hdmi_pll_init(struct dss_device *dss, struct platform_device *pdev,
+ struct hdmi_pll_data *pll, struct hdmi_wp_data *wp);
void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
/* HDMI PHY funcs */
@@ -357,6 +359,9 @@ static inline bool hdmi_mode_has_audio(struct hdmi_config *cfg)
struct omap_hdmi {
struct mutex lock;
struct platform_device *pdev;
+ struct dss_device *dss;
+
+ struct dss_debugfs_entry *debugfs;
struct hdmi_wp_data wp;
struct hdmi_pll_data pll;
@@ -384,4 +389,6 @@ struct omap_hdmi {
bool display_enabled;
};
+#define dssdev_to_hdmi(dssdev) container_of(dssdev, struct omap_hdmi, output)
+
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index bf914f2ac99e..97c88861d67a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -45,15 +45,13 @@
#include "dss.h"
#include "hdmi.h"
-static struct omap_hdmi hdmi;
-
-static int hdmi_runtime_get(void)
+static int hdmi_runtime_get(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_get\n");
- r = pm_runtime_get_sync(&hdmi.pdev->dev);
+ r = pm_runtime_get_sync(&hdmi->pdev->dev);
WARN_ON(r < 0);
if (r < 0)
return r;
@@ -61,13 +59,13 @@ static int hdmi_runtime_get(void)
return 0;
}
-static void hdmi_runtime_put(void)
+static void hdmi_runtime_put(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_put\n");
- r = pm_runtime_put_sync(&hdmi.pdev->dev);
+ r = pm_runtime_put_sync(&hdmi->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
@@ -110,14 +108,14 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int hdmi_init_regulator(void)
+static int hdmi_init_regulator(struct omap_hdmi *hdmi)
{
struct regulator *reg;
- if (hdmi.vdda_reg != NULL)
+ if (hdmi->vdda_reg != NULL)
return 0;
- reg = devm_regulator_get(&hdmi.pdev->dev, "vdda");
+ reg = devm_regulator_get(&hdmi->pdev->dev, "vdda");
if (IS_ERR(reg)) {
if (PTR_ERR(reg) != -EPROBE_DEFER)
@@ -125,64 +123,63 @@ static int hdmi_init_regulator(void)
return PTR_ERR(reg);
}
- hdmi.vdda_reg = reg;
+ hdmi->vdda_reg = reg;
return 0;
}
-static int hdmi_power_on_core(struct omap_dss_device *dssdev)
+static int hdmi_power_on_core(struct omap_hdmi *hdmi)
{
int r;
- if (hdmi.core.core_pwr_cnt++)
+ if (hdmi->core.core_pwr_cnt++)
return 0;
- r = regulator_enable(hdmi.vdda_reg);
+ r = regulator_enable(hdmi->vdda_reg);
if (r)
goto err_reg_enable;
- r = hdmi_runtime_get();
+ r = hdmi_runtime_get(hdmi);
if (r)
goto err_runtime_get;
- hdmi4_core_powerdown_disable(&hdmi.core);
+ hdmi4_core_powerdown_disable(&hdmi->core);
/* Make selection of HDMI in DSS */
- dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+ dss_select_hdmi_venc_clk_source(hdmi->dss, DSS_HDMI_M_PCLK);
- hdmi.core_enabled = true;
+ hdmi->core_enabled = true;
return 0;
err_runtime_get:
- regulator_disable(hdmi.vdda_reg);
+ regulator_disable(hdmi->vdda_reg);
err_reg_enable:
- hdmi.core.core_pwr_cnt--;
+ hdmi->core.core_pwr_cnt--;
return r;
}
-static void hdmi_power_off_core(struct omap_dss_device *dssdev)
+static void hdmi_power_off_core(struct omap_hdmi *hdmi)
{
- if (--hdmi.core.core_pwr_cnt)
+ if (--hdmi->core.core_pwr_cnt)
return;
- hdmi.core_enabled = false;
+ hdmi->core_enabled = false;
- hdmi_runtime_put();
- regulator_disable(hdmi.vdda_reg);
+ hdmi_runtime_put(hdmi);
+ regulator_disable(hdmi->vdda_reg);
}
-static int hdmi_power_on_full(struct omap_dss_device *dssdev)
+static int hdmi_power_on_full(struct omap_hdmi *hdmi)
{
int r;
struct videomode *vm;
- enum omap_channel channel = dssdev->dispc_channel;
- struct hdmi_wp_data *wp = &hdmi.wp;
+ struct hdmi_wp_data *wp = &hdmi->wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
- unsigned pc;
+ unsigned int pc;
- r = hdmi_power_on_core(dssdev);
+ r = hdmi_power_on_core(hdmi);
if (r)
return r;
@@ -190,7 +187,7 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
hdmi_wp_clear_irqenable(wp, ~HDMI_IRQ_CORE);
hdmi_wp_set_irqstatus(wp, ~HDMI_IRQ_CORE);
- vm = &hdmi.cfg.vm;
+ vm = &hdmi->cfg.vm;
DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
vm->vactive);
@@ -202,22 +199,22 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
/* DSS_HDMI_TCLK is bitclk / 10 */
pc *= 10;
- dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
+ dss_pll_calc_b(&hdmi->pll.pll, clk_get_rate(hdmi->pll.pll.clkin),
pc, &hdmi_cinfo);
- r = dss_pll_enable(&hdmi.pll.pll);
+ r = dss_pll_enable(&hdmi->pll.pll);
if (r) {
DSSERR("Failed to enable PLL\n");
goto err_pll_enable;
}
- r = dss_pll_set_config(&hdmi.pll.pll, &hdmi_cinfo);
+ r = dss_pll_set_config(&hdmi->pll.pll, &hdmi_cinfo);
if (r) {
DSSERR("Failed to configure PLL\n");
goto err_pll_cfg;
}
- r = hdmi_phy_configure(&hdmi.phy, hdmi_cinfo.clkdco,
+ r = hdmi_phy_configure(&hdmi->phy, hdmi_cinfo.clkdco,
hdmi_cinfo.clkout[0]);
if (r) {
DSSDBG("Failed to configure PHY\n");
@@ -228,16 +225,16 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
if (r)
goto err_phy_pwr;
- hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
+ hdmi4_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg);
/* tv size */
- dss_mgr_set_timings(channel, vm);
+ dss_mgr_set_timings(&hdmi->output, vm);
- r = dss_mgr_enable(channel);
+ r = dss_mgr_enable(&hdmi->output);
if (r)
goto err_mgr_enable;
- r = hdmi_wp_video_start(&hdmi.wp);
+ r = hdmi_wp_video_start(&hdmi->wp);
if (r)
goto err_vid_enable;
@@ -247,39 +244,39 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
return 0;
err_vid_enable:
- dss_mgr_disable(channel);
+ dss_mgr_disable(&hdmi->output);
err_mgr_enable:
- hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
+ hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
err_phy_pwr:
err_phy_cfg:
err_pll_cfg:
- dss_pll_disable(&hdmi.pll.pll);
+ dss_pll_disable(&hdmi->pll.pll);
err_pll_enable:
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
return -EIO;
}
-static void hdmi_power_off_full(struct omap_dss_device *dssdev)
+static void hdmi_power_off_full(struct omap_hdmi *hdmi)
{
- enum omap_channel channel = dssdev->dispc_channel;
-
- hdmi_wp_clear_irqenable(&hdmi.wp, ~HDMI_IRQ_CORE);
+ hdmi_wp_clear_irqenable(&hdmi->wp, ~HDMI_IRQ_CORE);
- hdmi_wp_video_stop(&hdmi.wp);
+ hdmi_wp_video_stop(&hdmi->wp);
- dss_mgr_disable(channel);
+ dss_mgr_disable(&hdmi->output);
- hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
+ hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
- dss_pll_disable(&hdmi.pll.pll);
+ dss_pll_disable(&hdmi->pll.pll);
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
@@ -288,52 +285,59 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- mutex_lock(&hdmi.lock);
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ mutex_lock(&hdmi->lock);
- hdmi.cfg.vm = *vm;
+ hdmi->cfg.vm = *vm;
- dispc_set_tv_pclk(vm->pixelclock);
+ dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- *vm = hdmi.cfg.vm;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ *vm = hdmi->cfg.vm;
}
-static void hdmi_dump_regs(struct seq_file *s)
+static int hdmi_dump_regs(struct seq_file *s, void *p)
{
- mutex_lock(&hdmi.lock);
+ struct omap_hdmi *hdmi = s->private;
- if (hdmi_runtime_get()) {
- mutex_unlock(&hdmi.lock);
- return;
+ mutex_lock(&hdmi->lock);
+
+ if (hdmi_runtime_get(hdmi)) {
+ mutex_unlock(&hdmi->lock);
+ return 0;
}
- hdmi_wp_dump(&hdmi.wp, s);
- hdmi_pll_dump(&hdmi.pll, s);
- hdmi_phy_dump(&hdmi.phy, s);
- hdmi4_core_dump(&hdmi.core, s);
+ hdmi_wp_dump(&hdmi->wp, s);
+ hdmi_pll_dump(&hdmi->pll, s);
+ hdmi_phy_dump(&hdmi->phy, s);
+ hdmi4_core_dump(&hdmi->core, s);
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
+ return 0;
}
-static int read_edid(u8 *buf, int len)
+static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
{
int r;
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- r = hdmi_runtime_get();
+ r = hdmi_runtime_get(hdmi);
BUG_ON(r);
- r = hdmi4_read_edid(&hdmi.core, buf, len);
+ r = hdmi4_read_edid(&hdmi->core, buf, len);
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
return r;
}
@@ -352,112 +356,117 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
static int hdmi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
int r = 0;
DSSDBG("ENTER hdmi_display_enable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- if (!out->dispc_channel_connected) {
+ if (!dssdev->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
r = -ENODEV;
goto err0;
}
- r = hdmi_power_on_full(dssdev);
+ r = hdmi_power_on_full(hdmi);
if (r) {
DSSERR("failed to power on device\n");
goto err0;
}
- if (hdmi.audio_configured) {
- r = hdmi4_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.vm.pixelclock);
+ if (hdmi->audio_configured) {
+ r = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
- hdmi.audio_abort_cb(&hdmi.pdev->dev);
- hdmi.audio_configured = false;
+ hdmi->audio_abort_cb(&hdmi->pdev->dev);
+ hdmi->audio_configured = false;
}
}
- spin_lock_irqsave(&hdmi.audio_playing_lock, flags);
- if (hdmi.audio_configured && hdmi.audio_playing)
- hdmi_start_audio_stream(&hdmi);
- hdmi.display_enabled = true;
- spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags);
+ spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+ if (hdmi->audio_configured && hdmi->audio_playing)
+ hdmi_start_audio_stream(hdmi);
+ hdmi->display_enabled = true;
+ spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return 0;
err0:
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return r;
}
static void hdmi_display_disable(struct omap_dss_device *dssdev)
{
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
DSSDBG("Enter hdmi_display_disable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- spin_lock_irqsave(&hdmi.audio_playing_lock, flags);
- hdmi_stop_audio_stream(&hdmi);
- hdmi.display_enabled = false;
- spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags);
+ spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+ hdmi_stop_audio_stream(hdmi);
+ hdmi->display_enabled = false;
+ spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
- hdmi_power_off_full(dssdev);
+ hdmi_power_off_full(hdmi);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
}
-int hdmi4_core_enable(struct omap_dss_device *dssdev)
+int hdmi4_core_enable(struct hdmi_core_data *core)
{
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
int r = 0;
DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- r = hdmi_power_on_core(dssdev);
+ r = hdmi_power_on_core(hdmi);
if (r) {
DSSERR("failed to power on device\n");
goto err0;
}
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return 0;
err0:
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return r;
}
-void hdmi4_core_disable(struct omap_dss_device *dssdev)
+void hdmi4_core_disable(struct hdmi_core_data *core)
{
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
+
DSSDBG("Enter omapdss_hdmi4_core_disable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
}
static int hdmi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
int r;
- r = hdmi_init_regulator();
+ r = hdmi_init_regulator(hdmi);
if (r)
return r;
- r = dss_mgr_connect(channel, dssdev);
+ r = dss_mgr_connect(&hdmi->output, dssdev);
if (r)
return r;
@@ -465,7 +474,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&hdmi->output, dssdev);
return r;
}
@@ -475,7 +484,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
static void hdmi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
WARN_ON(dst != dssdev->dst);
@@ -484,51 +493,58 @@ static void hdmi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&hdmi->output, dssdev);
}
static int hdmi_read_edid(struct omap_dss_device *dssdev,
u8 *edid, int len)
{
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
bool need_enable;
int r;
- need_enable = hdmi.core_enabled == false;
+ need_enable = hdmi->core_enabled == false;
if (need_enable) {
- r = hdmi4_core_enable(dssdev);
+ r = hdmi4_core_enable(&hdmi->core);
if (r)
return r;
}
- r = read_edid(edid, len);
+ r = read_edid(hdmi, edid, len);
if (r >= 256)
- hdmi4_cec_set_phys_addr(&hdmi.core,
+ hdmi4_cec_set_phys_addr(&hdmi->core,
cec_get_edid_phys_addr(edid, r, NULL));
else
- hdmi4_cec_set_phys_addr(&hdmi.core, CEC_PHYS_ADDR_INVALID);
+ hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
if (need_enable)
- hdmi4_core_disable(dssdev);
+ hdmi4_core_disable(&hdmi->core);
return r;
}
static void hdmi_lost_hotplug(struct omap_dss_device *dssdev)
{
- hdmi4_cec_set_phys_addr(&hdmi.core, CEC_PHYS_ADDR_INVALID);
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
}
static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
- hdmi.cfg.infoframe = *avi;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ hdmi->cfg.infoframe = *avi;
return 0;
}
static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
bool hdmi_mode)
{
- hdmi.cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
return 0;
}
@@ -549,11 +565,11 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.set_hdmi_mode = hdmi_set_hdmi_mode,
};
-static void hdmi_init_output(struct platform_device *pdev)
+static void hdmi_init_output(struct omap_hdmi *hdmi)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_dss_device *out = &hdmi->output;
- out->dev = &pdev->dev;
+ out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->output_type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
@@ -564,15 +580,16 @@ static void hdmi_init_output(struct platform_device *pdev)
omapdss_register_output(out);
}
-static void hdmi_uninit_output(struct platform_device *pdev)
+static void hdmi_uninit_output(struct omap_hdmi *hdmi)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_dss_device *out = &hdmi->output;
omapdss_unregister_output(out);
}
-static int hdmi_probe_of(struct platform_device *pdev)
+static int hdmi_probe_of(struct omap_hdmi *hdmi)
{
+ struct platform_device *pdev = hdmi->pdev;
struct device_node *node = pdev->dev.of_node;
struct device_node *ep;
int r;
@@ -581,7 +598,7 @@ static int hdmi_probe_of(struct platform_device *pdev)
if (!ep)
return 0;
- r = hdmi_parse_lanes_of(pdev, ep, &hdmi.phy);
+ r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
if (r)
goto err;
@@ -598,21 +615,16 @@ static int hdmi_audio_startup(struct device *dev,
void (*abort_cb)(struct device *dev))
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
- int ret = 0;
mutex_lock(&hd->lock);
- if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
- ret = -EPERM;
- goto out;
- }
+ WARN_ON(hd->audio_abort_cb != NULL);
hd->audio_abort_cb = abort_cb;
-out:
mutex_unlock(&hd->lock);
- return ret;
+ return 0;
}
static int hdmi_audio_shutdown(struct device *dev)
@@ -633,12 +645,14 @@ static int hdmi_audio_start(struct device *dev)
struct omap_hdmi *hd = dev_get_drvdata(dev);
unsigned long flags;
- WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
-
spin_lock_irqsave(&hd->audio_playing_lock, flags);
- if (hd->display_enabled)
+ if (hd->display_enabled) {
+ if (!hdmi_mode_has_audio(&hd->cfg))
+ DSSERR("%s: Video mode does not support audio\n",
+ __func__);
hdmi_start_audio_stream(hd);
+ }
hd->audio_playing = true;
spin_unlock_irqrestore(&hd->audio_playing_lock, flags);
@@ -669,17 +683,15 @@ static int hdmi_audio_config(struct device *dev,
mutex_lock(&hd->lock);
- if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
- ret = -EPERM;
- goto out;
+ if (hd->display_enabled) {
+ ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
+ hd->cfg.vm.pixelclock);
+ if (ret)
+ goto out;
}
- ret = hdmi4_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.vm.pixelclock);
- if (!ret) {
- hd->audio_configured = true;
- hd->audio_config = *dss_audio;
- }
+ hd->audio_configured = true;
+ hd->audio_config = *dss_audio;
out:
mutex_unlock(&hd->lock);
@@ -694,21 +706,21 @@ static const struct omap_hdmi_audio_ops hdmi_audio_ops = {
.audio_config = hdmi_audio_config,
};
-static int hdmi_audio_register(struct device *dev)
+static int hdmi_audio_register(struct omap_hdmi *hdmi)
{
struct omap_hdmi_audio_pdata pdata = {
- .dev = dev,
+ .dev = &hdmi->pdev->dev,
.version = 4,
- .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
+ .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi->wp),
.ops = &hdmi_audio_ops,
};
- hdmi.audio_pdev = platform_device_register_data(
- dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
+ hdmi->audio_pdev = platform_device_register_data(
+ &hdmi->pdev->dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
&pdata, sizeof(pdata));
- if (IS_ERR(hdmi.audio_pdev))
- return PTR_ERR(hdmi.audio_pdev);
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
return 0;
}
@@ -717,88 +729,103 @@ static int hdmi_audio_register(struct device *dev)
static int hdmi4_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct dss_device *dss = dss_get_device(master);
+ struct omap_hdmi *hdmi;
int r;
int irq;
- hdmi.pdev = pdev;
- dev_set_drvdata(&pdev->dev, &hdmi);
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->pdev = pdev;
+ hdmi->dss = dss;
+ dev_set_drvdata(&pdev->dev, hdmi);
- mutex_init(&hdmi.lock);
- spin_lock_init(&hdmi.audio_playing_lock);
+ mutex_init(&hdmi->lock);
+ spin_lock_init(&hdmi->audio_playing_lock);
- r = hdmi_probe_of(pdev);
+ r = hdmi_probe_of(hdmi);
if (r)
- return r;
+ goto err_free;
- r = hdmi_wp_init(pdev, &hdmi.wp, 4);
+ r = hdmi_wp_init(pdev, &hdmi->wp, 4);
if (r)
- return r;
+ goto err_free;
- r = hdmi_pll_init(pdev, &hdmi.pll, &hdmi.wp);
+ r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp);
if (r)
- return r;
+ goto err_free;
- r = hdmi_phy_init(pdev, &hdmi.phy, 4);
+ r = hdmi_phy_init(pdev, &hdmi->phy, 4);
if (r)
- goto err;
+ goto err_pll;
- r = hdmi4_core_init(pdev, &hdmi.core);
+ r = hdmi4_core_init(pdev, &hdmi->core);
if (r)
- goto err;
+ goto err_pll;
- r = hdmi4_cec_init(pdev, &hdmi.core, &hdmi.wp);
+ r = hdmi4_cec_init(pdev, &hdmi->core, &hdmi->wp);
if (r)
- goto err;
+ goto err_pll;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err;
+ goto err_pll;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
NULL, hdmi_irq_handler,
- IRQF_ONESHOT, "OMAP HDMI", &hdmi);
+ IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err;
+ goto err_pll;
}
pm_runtime_enable(&pdev->dev);
- hdmi_init_output(pdev);
+ hdmi_init_output(hdmi);
- r = hdmi_audio_register(&pdev->dev);
+ r = hdmi_audio_register(hdmi);
if (r) {
DSSERR("Registering HDMI audio failed\n");
- hdmi_uninit_output(pdev);
+ hdmi_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
return r;
}
- dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+ hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
+ hdmi);
return 0;
-err:
- hdmi_pll_uninit(&hdmi.pll);
+
+err_pll:
+ hdmi_pll_uninit(&hdmi->pll);
+err_free:
+ kfree(hdmi);
return r;
}
static void hdmi4_unbind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(hdmi->debugfs);
- if (hdmi.audio_pdev)
- platform_device_unregister(hdmi.audio_pdev);
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
- hdmi_uninit_output(pdev);
+ hdmi_uninit_output(hdmi);
- hdmi4_cec_uninit(&hdmi.core);
+ hdmi4_cec_uninit(&hdmi->core);
- hdmi_pll_uninit(&hdmi.pll);
+ hdmi_pll_uninit(&hdmi->pll);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
+
+ kfree(hdmi);
}
static const struct component_ops hdmi4_component_ops = {
@@ -819,16 +846,19 @@ static int hdmi4_remove(struct platform_device *pdev)
static int hdmi_runtime_suspend(struct device *dev)
{
- dispc_runtime_put();
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dispc_runtime_put(hdmi->dss->dispc);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
int r;
- r = dispc_runtime_get();
+ r = dispc_runtime_get(hdmi->dss->dispc);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
index 23db74ae1826..340383150fb9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
@@ -175,10 +175,10 @@ static int hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
REG_FLD_MOD(core->base, HDMI_CORE_SYS_INTR_UNMASK4, 0, 3, 3);
hdmi_wp_clear_irqenable(core->wp, HDMI_IRQ_CORE);
hdmi_wp_set_irqstatus(core->wp, HDMI_IRQ_CORE);
- hdmi4_core_disable(NULL);
+ hdmi4_core_disable(core);
return 0;
}
- err = hdmi4_core_enable(NULL);
+ err = hdmi4_core_enable(core);
if (err)
return err;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
index b6ab579e44d2..337a317c1a27 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
@@ -266,8 +266,8 @@ void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core);
-int hdmi4_core_enable(struct omap_dss_device *dssdev);
-void hdmi4_core_disable(struct omap_dss_device *dssdev);
+int hdmi4_core_enable(struct hdmi_core_data *core);
+void hdmi4_core_disable(struct hdmi_core_data *core);
void hdmi4_core_powerdown_disable(struct hdmi_core_data *core);
int hdmi4_audio_start(struct hdmi_core_data *core, struct hdmi_wp_data *wp);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 689cda41858b..d28da9ac3e90 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -46,15 +46,13 @@
#include "hdmi5_core.h"
#include "dss.h"
-static struct omap_hdmi hdmi;
-
-static int hdmi_runtime_get(void)
+static int hdmi_runtime_get(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_get\n");
- r = pm_runtime_get_sync(&hdmi.pdev->dev);
+ r = pm_runtime_get_sync(&hdmi->pdev->dev);
WARN_ON(r < 0);
if (r < 0)
return r;
@@ -62,19 +60,20 @@ static int hdmi_runtime_get(void)
return 0;
}
-static void hdmi_runtime_put(void)
+static void hdmi_runtime_put(struct omap_hdmi *hdmi)
{
int r;
DSSDBG("hdmi_runtime_put\n");
- r = pm_runtime_put_sync(&hdmi.pdev->dev);
+ r = pm_runtime_put_sync(&hdmi->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
static irqreturn_t hdmi_irq_handler(int irq, void *data)
{
- struct hdmi_wp_data *wp = data;
+ struct omap_hdmi *hdmi = data;
+ struct hdmi_wp_data *wp = &hdmi->wp;
u32 irqstatus;
irqstatus = hdmi_wp_get_irqstatus(wp);
@@ -97,17 +96,17 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
* setting the PHY to LDOON. To ignore those, we force the RXDET
* line to 0 until the PHY power state has been changed.
*/
- v = hdmi_read_reg(hdmi.phy.base, HDMI_TXPHY_PAD_CFG_CTRL);
+ v = hdmi_read_reg(hdmi->phy.base, HDMI_TXPHY_PAD_CFG_CTRL);
v = FLD_MOD(v, 1, 15, 15); /* FORCE_RXDET_HIGH */
v = FLD_MOD(v, 0, 14, 7); /* RXDET_LINE */
- hdmi_write_reg(hdmi.phy.base, HDMI_TXPHY_PAD_CFG_CTRL, v);
+ hdmi_write_reg(hdmi->phy.base, HDMI_TXPHY_PAD_CFG_CTRL, v);
hdmi_wp_set_irqstatus(wp, HDMI_IRQ_LINK_CONNECT |
HDMI_IRQ_LINK_DISCONNECT);
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_LDOON);
- REG_FLD_MOD(hdmi.phy.base, HDMI_TXPHY_PAD_CFG_CTRL, 0, 15, 15);
+ REG_FLD_MOD(hdmi->phy.base, HDMI_TXPHY_PAD_CFG_CTRL, 0, 15, 15);
} else if (irqstatus & HDMI_IRQ_LINK_CONNECT) {
hdmi_wp_set_phy_pwr(wp, HDMI_PHYPWRCMD_TXON);
@@ -118,70 +117,69 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int hdmi_init_regulator(void)
+static int hdmi_init_regulator(struct omap_hdmi *hdmi)
{
struct regulator *reg;
- if (hdmi.vdda_reg != NULL)
+ if (hdmi->vdda_reg != NULL)
return 0;
- reg = devm_regulator_get(&hdmi.pdev->dev, "vdda");
+ reg = devm_regulator_get(&hdmi->pdev->dev, "vdda");
if (IS_ERR(reg)) {
DSSERR("can't get VDDA regulator\n");
return PTR_ERR(reg);
}
- hdmi.vdda_reg = reg;
+ hdmi->vdda_reg = reg;
return 0;
}
-static int hdmi_power_on_core(struct omap_dss_device *dssdev)
+static int hdmi_power_on_core(struct omap_hdmi *hdmi)
{
int r;
- r = regulator_enable(hdmi.vdda_reg);
+ r = regulator_enable(hdmi->vdda_reg);
if (r)
return r;
- r = hdmi_runtime_get();
+ r = hdmi_runtime_get(hdmi);
if (r)
goto err_runtime_get;
/* Make selection of HDMI in DSS */
- dss_select_hdmi_venc_clk_source(DSS_HDMI_M_PCLK);
+ dss_select_hdmi_venc_clk_source(hdmi->dss, DSS_HDMI_M_PCLK);
- hdmi.core_enabled = true;
+ hdmi->core_enabled = true;
return 0;
err_runtime_get:
- regulator_disable(hdmi.vdda_reg);
+ regulator_disable(hdmi->vdda_reg);
return r;
}
-static void hdmi_power_off_core(struct omap_dss_device *dssdev)
+static void hdmi_power_off_core(struct omap_hdmi *hdmi)
{
- hdmi.core_enabled = false;
+ hdmi->core_enabled = false;
- hdmi_runtime_put();
- regulator_disable(hdmi.vdda_reg);
+ hdmi_runtime_put(hdmi);
+ regulator_disable(hdmi->vdda_reg);
}
-static int hdmi_power_on_full(struct omap_dss_device *dssdev)
+static int hdmi_power_on_full(struct omap_hdmi *hdmi)
{
int r;
struct videomode *vm;
- enum omap_channel channel = dssdev->dispc_channel;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
- unsigned pc;
+ unsigned int pc;
- r = hdmi_power_on_core(dssdev);
+ r = hdmi_power_on_core(hdmi);
if (r)
return r;
- vm = &hdmi.cfg.vm;
+ vm = &hdmi->cfg.vm;
DSSDBG("hdmi_power_on hactive= %d vactive = %d\n", vm->hactive,
vm->vactive);
@@ -193,89 +191,89 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
/* DSS_HDMI_TCLK is bitclk / 10 */
pc *= 10;
- dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
+ dss_pll_calc_b(&hdmi->pll.pll, clk_get_rate(hdmi->pll.pll.clkin),
pc, &hdmi_cinfo);
/* disable and clear irqs */
- hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
- hdmi_wp_set_irqstatus(&hdmi.wp,
- hdmi_wp_get_irqstatus(&hdmi.wp));
+ hdmi_wp_clear_irqenable(&hdmi->wp, 0xffffffff);
+ hdmi_wp_set_irqstatus(&hdmi->wp,
+ hdmi_wp_get_irqstatus(&hdmi->wp));
- r = dss_pll_enable(&hdmi.pll.pll);
+ r = dss_pll_enable(&hdmi->pll.pll);
if (r) {
DSSERR("Failed to enable PLL\n");
goto err_pll_enable;
}
- r = dss_pll_set_config(&hdmi.pll.pll, &hdmi_cinfo);
+ r = dss_pll_set_config(&hdmi->pll.pll, &hdmi_cinfo);
if (r) {
DSSERR("Failed to configure PLL\n");
goto err_pll_cfg;
}
- r = hdmi_phy_configure(&hdmi.phy, hdmi_cinfo.clkdco,
+ r = hdmi_phy_configure(&hdmi->phy, hdmi_cinfo.clkdco,
hdmi_cinfo.clkout[0]);
if (r) {
DSSDBG("Failed to start PHY\n");
goto err_phy_cfg;
}
- r = hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_LDOON);
+ r = hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_LDOON);
if (r)
goto err_phy_pwr;
- hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
+ hdmi5_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg);
/* tv size */
- dss_mgr_set_timings(channel, vm);
+ dss_mgr_set_timings(&hdmi->output, vm);
- r = dss_mgr_enable(channel);
+ r = dss_mgr_enable(&hdmi->output);
if (r)
goto err_mgr_enable;
- r = hdmi_wp_video_start(&hdmi.wp);
+ r = hdmi_wp_video_start(&hdmi->wp);
if (r)
goto err_vid_enable;
- hdmi_wp_set_irqenable(&hdmi.wp,
+ hdmi_wp_set_irqenable(&hdmi->wp,
HDMI_IRQ_LINK_CONNECT | HDMI_IRQ_LINK_DISCONNECT);
return 0;
err_vid_enable:
- dss_mgr_disable(channel);
+ dss_mgr_disable(&hdmi->output);
err_mgr_enable:
- hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
+ hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
err_phy_pwr:
err_phy_cfg:
err_pll_cfg:
- dss_pll_disable(&hdmi.pll.pll);
+ dss_pll_disable(&hdmi->pll.pll);
err_pll_enable:
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
return -EIO;
}
-static void hdmi_power_off_full(struct omap_dss_device *dssdev)
+static void hdmi_power_off_full(struct omap_hdmi *hdmi)
{
- enum omap_channel channel = dssdev->dispc_channel;
-
- hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
+ hdmi_wp_clear_irqenable(&hdmi->wp, 0xffffffff);
- hdmi_wp_video_stop(&hdmi.wp);
+ hdmi_wp_video_stop(&hdmi->wp);
- dss_mgr_disable(channel);
+ dss_mgr_disable(&hdmi->output);
- hdmi_wp_set_phy_pwr(&hdmi.wp, HDMI_PHYPWRCMD_OFF);
+ hdmi_wp_set_phy_pwr(&hdmi->wp, HDMI_PHYPWRCMD_OFF);
- dss_pll_disable(&hdmi.pll.pll);
+ dss_pll_disable(&hdmi->pll.pll);
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
}
static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- if (!dispc_mgr_timings_ok(dssdev->dispc_channel, vm))
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm))
return -EINVAL;
return 0;
@@ -284,66 +282,73 @@ static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- mutex_lock(&hdmi.lock);
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
- hdmi.cfg.vm = *vm;
+ mutex_lock(&hdmi->lock);
- dispc_set_tv_pclk(vm->pixelclock);
+ hdmi->cfg.vm = *vm;
- mutex_unlock(&hdmi.lock);
+ dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock);
+
+ mutex_unlock(&hdmi->lock);
}
static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- *vm = hdmi.cfg.vm;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ *vm = hdmi->cfg.vm;
}
-static void hdmi_dump_regs(struct seq_file *s)
+static int hdmi_dump_regs(struct seq_file *s, void *p)
{
- mutex_lock(&hdmi.lock);
+ struct omap_hdmi *hdmi = s->private;
- if (hdmi_runtime_get()) {
- mutex_unlock(&hdmi.lock);
- return;
+ mutex_lock(&hdmi->lock);
+
+ if (hdmi_runtime_get(hdmi)) {
+ mutex_unlock(&hdmi->lock);
+ return 0;
}
- hdmi_wp_dump(&hdmi.wp, s);
- hdmi_pll_dump(&hdmi.pll, s);
- hdmi_phy_dump(&hdmi.phy, s);
- hdmi5_core_dump(&hdmi.core, s);
+ hdmi_wp_dump(&hdmi->wp, s);
+ hdmi_pll_dump(&hdmi->pll, s);
+ hdmi_phy_dump(&hdmi->phy, s);
+ hdmi5_core_dump(&hdmi->core, s);
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
+ return 0;
}
-static int read_edid(u8 *buf, int len)
+static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
{
int r;
int idlemode;
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- r = hdmi_runtime_get();
+ r = hdmi_runtime_get(hdmi);
BUG_ON(r);
- idlemode = REG_GET(hdmi.wp.base, HDMI_WP_SYSCONFIG, 3, 2);
+ idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
/* No-idle mode */
- REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
- r = hdmi5_read_edid(&hdmi.core, buf, len);
+ r = hdmi5_read_edid(&hdmi->core, buf, len);
- REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
return r;
}
static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
- REG_FLD_MOD(hdmi.wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
hdmi_wp_audio_enable(&hd->wp, true);
hdmi_wp_audio_core_req_enable(&hd->wp, true);
}
@@ -357,112 +362,114 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
static int hdmi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
int r = 0;
DSSDBG("ENTER hdmi_display_enable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- if (!out->dispc_channel_connected) {
+ if (!dssdev->dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
r = -ENODEV;
goto err0;
}
- r = hdmi_power_on_full(dssdev);
+ r = hdmi_power_on_full(hdmi);
if (r) {
DSSERR("failed to power on device\n");
goto err0;
}
- if (hdmi.audio_configured) {
- r = hdmi5_audio_config(&hdmi.core, &hdmi.wp, &hdmi.audio_config,
- hdmi.cfg.vm.pixelclock);
+ if (hdmi->audio_configured) {
+ r = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
if (r) {
DSSERR("Error restoring audio configuration: %d", r);
- hdmi.audio_abort_cb(&hdmi.pdev->dev);
- hdmi.audio_configured = false;
+ hdmi->audio_abort_cb(&hdmi->pdev->dev);
+ hdmi->audio_configured = false;
}
}
- spin_lock_irqsave(&hdmi.audio_playing_lock, flags);
- if (hdmi.audio_configured && hdmi.audio_playing)
- hdmi_start_audio_stream(&hdmi);
- hdmi.display_enabled = true;
- spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags);
+ spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+ if (hdmi->audio_configured && hdmi->audio_playing)
+ hdmi_start_audio_stream(hdmi);
+ hdmi->display_enabled = true;
+ spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return 0;
err0:
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return r;
}
static void hdmi_display_disable(struct omap_dss_device *dssdev)
{
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
DSSDBG("Enter hdmi_display_disable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- spin_lock_irqsave(&hdmi.audio_playing_lock, flags);
- hdmi_stop_audio_stream(&hdmi);
- hdmi.display_enabled = false;
- spin_unlock_irqrestore(&hdmi.audio_playing_lock, flags);
+ spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
+ hdmi_stop_audio_stream(hdmi);
+ hdmi->display_enabled = false;
+ spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
- hdmi_power_off_full(dssdev);
+ hdmi_power_off_full(hdmi);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
}
-static int hdmi_core_enable(struct omap_dss_device *dssdev)
+static int hdmi_core_enable(struct omap_hdmi *hdmi)
{
int r = 0;
DSSDBG("ENTER omapdss_hdmi_core_enable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- r = hdmi_power_on_core(dssdev);
+ r = hdmi_power_on_core(hdmi);
if (r) {
DSSERR("failed to power on device\n");
goto err0;
}
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return 0;
err0:
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
return r;
}
-static void hdmi_core_disable(struct omap_dss_device *dssdev)
+static void hdmi_core_disable(struct omap_hdmi *hdmi)
{
DSSDBG("Enter omapdss_hdmi_core_disable\n");
- mutex_lock(&hdmi.lock);
+ mutex_lock(&hdmi->lock);
- hdmi_power_off_core(dssdev);
+ hdmi_power_off_core(hdmi);
- mutex_unlock(&hdmi.lock);
+ mutex_unlock(&hdmi->lock);
}
static int hdmi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
int r;
- r = hdmi_init_regulator();
+ r = hdmi_init_regulator(hdmi);
if (r)
return r;
- r = dss_mgr_connect(channel, dssdev);
+ r = dss_mgr_connect(&hdmi->output, dssdev);
if (r)
return r;
@@ -470,7 +477,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&hdmi->output, dssdev);
return r;
}
@@ -480,7 +487,7 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
static void hdmi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
WARN_ON(dst != dssdev->dst);
@@ -489,27 +496,28 @@ static void hdmi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&hdmi->output, dssdev);
}
static int hdmi_read_edid(struct omap_dss_device *dssdev,
u8 *edid, int len)
{
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
bool need_enable;
int r;
- need_enable = hdmi.core_enabled == false;
+ need_enable = hdmi->core_enabled == false;
if (need_enable) {
- r = hdmi_core_enable(dssdev);
+ r = hdmi_core_enable(hdmi);
if (r)
return r;
}
- r = read_edid(edid, len);
+ r = read_edid(hdmi, edid, len);
if (need_enable)
- hdmi_core_disable(dssdev);
+ hdmi_core_disable(hdmi);
return r;
}
@@ -517,14 +525,18 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi)
{
- hdmi.cfg.infoframe = *avi;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ hdmi->cfg.infoframe = *avi;
return 0;
}
static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
bool hdmi_mode)
{
- hdmi.cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
+ struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+
+ hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
return 0;
}
@@ -544,11 +556,11 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.set_hdmi_mode = hdmi_set_hdmi_mode,
};
-static void hdmi_init_output(struct platform_device *pdev)
+static void hdmi_init_output(struct omap_hdmi *hdmi)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_dss_device *out = &hdmi->output;
- out->dev = &pdev->dev;
+ out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->output_type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
@@ -559,15 +571,16 @@ static void hdmi_init_output(struct platform_device *pdev)
omapdss_register_output(out);
}
-static void hdmi_uninit_output(struct platform_device *pdev)
+static void hdmi_uninit_output(struct omap_hdmi *hdmi)
{
- struct omap_dss_device *out = &hdmi.output;
+ struct omap_dss_device *out = &hdmi->output;
omapdss_unregister_output(out);
}
-static int hdmi_probe_of(struct platform_device *pdev)
+static int hdmi_probe_of(struct omap_hdmi *hdmi)
{
+ struct platform_device *pdev = hdmi->pdev;
struct device_node *node = pdev->dev.of_node;
struct device_node *ep;
int r;
@@ -576,7 +589,7 @@ static int hdmi_probe_of(struct platform_device *pdev)
if (!ep)
return 0;
- r = hdmi_parse_lanes_of(pdev, ep, &hdmi.phy);
+ r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
if (r)
goto err;
@@ -593,21 +606,16 @@ static int hdmi_audio_startup(struct device *dev,
void (*abort_cb)(struct device *dev))
{
struct omap_hdmi *hd = dev_get_drvdata(dev);
- int ret = 0;
mutex_lock(&hd->lock);
- if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
- ret = -EPERM;
- goto out;
- }
+ WARN_ON(hd->audio_abort_cb != NULL);
hd->audio_abort_cb = abort_cb;
-out:
mutex_unlock(&hd->lock);
- return ret;
+ return 0;
}
static int hdmi_audio_shutdown(struct device *dev)
@@ -628,12 +636,14 @@ static int hdmi_audio_start(struct device *dev)
struct omap_hdmi *hd = dev_get_drvdata(dev);
unsigned long flags;
- WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
-
spin_lock_irqsave(&hd->audio_playing_lock, flags);
- if (hd->display_enabled)
+ if (hd->display_enabled) {
+ if (!hdmi_mode_has_audio(&hd->cfg))
+ DSSERR("%s: Video mode does not support audio\n",
+ __func__);
hdmi_start_audio_stream(hd);
+ }
hd->audio_playing = true;
spin_unlock_irqrestore(&hd->audio_playing_lock, flags);
@@ -645,7 +655,8 @@ static void hdmi_audio_stop(struct device *dev)
struct omap_hdmi *hd = dev_get_drvdata(dev);
unsigned long flags;
- WARN_ON(!hdmi_mode_has_audio(&hd->cfg));
+ if (!hdmi_mode_has_audio(&hd->cfg))
+ DSSERR("%s: Video mode does not support audio\n", __func__);
spin_lock_irqsave(&hd->audio_playing_lock, flags);
@@ -664,18 +675,15 @@ static int hdmi_audio_config(struct device *dev,
mutex_lock(&hd->lock);
- if (!hdmi_mode_has_audio(&hd->cfg) || !hd->display_enabled) {
- ret = -EPERM;
- goto out;
+ if (hd->display_enabled) {
+ ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
+ hd->cfg.vm.pixelclock);
+ if (ret)
+ goto out;
}
- ret = hdmi5_audio_config(&hd->core, &hd->wp, dss_audio,
- hd->cfg.vm.pixelclock);
-
- if (!ret) {
- hd->audio_configured = true;
- hd->audio_config = *dss_audio;
- }
+ hd->audio_configured = true;
+ hd->audio_config = *dss_audio;
out:
mutex_unlock(&hd->lock);
@@ -690,26 +698,26 @@ static const struct omap_hdmi_audio_ops hdmi_audio_ops = {
.audio_config = hdmi_audio_config,
};
-static int hdmi_audio_register(struct device *dev)
+static int hdmi_audio_register(struct omap_hdmi *hdmi)
{
struct omap_hdmi_audio_pdata pdata = {
- .dev = dev,
+ .dev = &hdmi->pdev->dev,
.version = 5,
- .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi.wp),
+ .audio_dma_addr = hdmi_wp_get_audio_dma_addr(&hdmi->wp),
.ops = &hdmi_audio_ops,
};
- hdmi.audio_pdev = platform_device_register_data(
- dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
+ hdmi->audio_pdev = platform_device_register_data(
+ &hdmi->pdev->dev, "omap-hdmi-audio", PLATFORM_DEVID_AUTO,
&pdata, sizeof(pdata));
- if (IS_ERR(hdmi.audio_pdev))
- return PTR_ERR(hdmi.audio_pdev);
+ if (IS_ERR(hdmi->audio_pdev))
+ return PTR_ERR(hdmi->audio_pdev);
- hdmi_runtime_get();
- hdmi.wp_idlemode =
- REG_GET(hdmi.wp.base, HDMI_WP_SYSCONFIG, 3, 2);
- hdmi_runtime_put();
+ hdmi_runtime_get(hdmi);
+ hdmi->wp_idlemode =
+ REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
+ hdmi_runtime_put(hdmi);
return 0;
}
@@ -718,82 +726,97 @@ static int hdmi_audio_register(struct device *dev)
static int hdmi5_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct dss_device *dss = dss_get_device(master);
+ struct omap_hdmi *hdmi;
int r;
int irq;
- hdmi.pdev = pdev;
- dev_set_drvdata(&pdev->dev, &hdmi);
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
- mutex_init(&hdmi.lock);
- spin_lock_init(&hdmi.audio_playing_lock);
+ hdmi->pdev = pdev;
+ hdmi->dss = dss;
+ dev_set_drvdata(&pdev->dev, hdmi);
- r = hdmi_probe_of(pdev);
+ mutex_init(&hdmi->lock);
+ spin_lock_init(&hdmi->audio_playing_lock);
+
+ r = hdmi_probe_of(hdmi);
if (r)
- return r;
+ goto err_free;
- r = hdmi_wp_init(pdev, &hdmi.wp, 5);
+ r = hdmi_wp_init(pdev, &hdmi->wp, 5);
if (r)
- return r;
+ goto err_free;
- r = hdmi_pll_init(pdev, &hdmi.pll, &hdmi.wp);
+ r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp);
if (r)
- return r;
+ goto err_free;
- r = hdmi_phy_init(pdev, &hdmi.phy, 5);
+ r = hdmi_phy_init(pdev, &hdmi->phy, 5);
if (r)
- goto err;
+ goto err_pll;
- r = hdmi5_core_init(pdev, &hdmi.core);
+ r = hdmi5_core_init(pdev, &hdmi->core);
if (r)
- goto err;
+ goto err_pll;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err;
+ goto err_pll;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
NULL, hdmi_irq_handler,
- IRQF_ONESHOT, "OMAP HDMI", &hdmi.wp);
+ IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err;
+ goto err_pll;
}
pm_runtime_enable(&pdev->dev);
- hdmi_init_output(pdev);
+ hdmi_init_output(hdmi);
- r = hdmi_audio_register(&pdev->dev);
+ r = hdmi_audio_register(hdmi);
if (r) {
DSSERR("Registering HDMI audio failed %d\n", r);
- hdmi_uninit_output(pdev);
+ hdmi_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
return r;
}
- dss_debugfs_create_file("hdmi", hdmi_dump_regs);
+ hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
+ hdmi);
return 0;
-err:
- hdmi_pll_uninit(&hdmi.pll);
+
+err_pll:
+ hdmi_pll_uninit(&hdmi->pll);
+err_free:
+ kfree(hdmi);
return r;
}
static void hdmi5_unbind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(hdmi->debugfs);
- if (hdmi.audio_pdev)
- platform_device_unregister(hdmi.audio_pdev);
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
- hdmi_uninit_output(pdev);
+ hdmi_uninit_output(hdmi);
- hdmi_pll_uninit(&hdmi.pll);
+ hdmi_pll_uninit(&hdmi->pll);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_disable(dev);
+
+ kfree(hdmi);
}
static const struct component_ops hdmi5_component_ops = {
@@ -814,16 +837,19 @@ static int hdmi5_remove(struct platform_device *pdev)
static int hdmi_runtime_suspend(struct device *dev)
{
- dispc_runtime_put();
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dispc_runtime_put(hdmi->dss->dispc);
return 0;
}
static int hdmi_runtime_resume(struct device *dev)
{
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
int r;
- r = dispc_runtime_get();
+ r = dispc_runtime_get(hdmi->dss->dispc);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 09759f8ea7bc..2282e48574c6 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -50,14 +50,14 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
- const unsigned ss_scl_high = 4600; /* ns */
- const unsigned ss_scl_low = 5400; /* ns */
- const unsigned fs_scl_high = 600; /* ns */
- const unsigned fs_scl_low = 1300; /* ns */
- const unsigned sda_hold = 1000; /* ns */
- const unsigned sfr_div = 10;
+ const unsigned int ss_scl_high = 4600; /* ns */
+ const unsigned int ss_scl_low = 5400; /* ns */
+ const unsigned int fs_scl_high = 600; /* ns */
+ const unsigned int fs_scl_low = 1300; /* ns */
+ const unsigned int sda_hold = 1000; /* ns */
+ const unsigned int sfr_div = 10;
unsigned long long sfr;
- unsigned v;
+ unsigned int v;
sfr = iclk / sfr_div; /* SFR_DIV */
sfr /= 1000; /* SFR clock in kHz */
@@ -430,11 +430,11 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core,
void __iomem *base = core->base;
u8 data[HDMI_INFOFRAME_SIZE(AVI)];
u8 *ptr;
- unsigned y, a, b, s;
- unsigned c, m, r;
- unsigned itc, ec, q, sc;
- unsigned vic;
- unsigned yq, cn, pr;
+ unsigned int y, a, b, s;
+ unsigned int c, m, r;
+ unsigned int itc, ec, q, sc;
+ unsigned int vic;
+ unsigned int yq, cn, pr;
hdmi_avi_infoframe_pack(frame, data, sizeof(data));
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index 5c14ed851609..9915923a53bd 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -99,7 +99,7 @@ static void hdmi_phy_configure_lanes(struct hdmi_phy_data *phy)
u16 lane_cfg = 0;
int i;
- unsigned lane_cfg_val;
+ unsigned int lane_cfg_val;
u16 pol_val = 0;
for (i = 0; i < 4; ++i)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index 08885d7de1e8..e7be3707d147 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -48,7 +48,7 @@ static int hdmi_pll_enable(struct dss_pll *dsspll)
r = pm_runtime_get_sync(&pll->pdev->dev);
WARN_ON(r < 0);
- dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
+ dss_ctrl_pll_enable(dsspll, true);
r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
if (r)
@@ -65,7 +65,7 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
- dss_ctrl_pll_enable(DSS_PLL_HDMI, false);
+ dss_ctrl_pll_enable(dsspll, false);
r = pm_runtime_put_sync(&pll->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
@@ -128,7 +128,8 @@ static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
.has_refsel = true,
};
-static int hdmi_init_pll_data(struct platform_device *pdev,
+static int hdmi_init_pll_data(struct dss_device *dss,
+ struct platform_device *pdev,
struct hdmi_pll_data *hpll)
{
struct dss_pll *pll = &hpll->pll;
@@ -153,15 +154,15 @@ static int hdmi_init_pll_data(struct platform_device *pdev,
pll->ops = &hdmi_pll_ops;
- r = dss_pll_register(pll);
+ r = dss_pll_register(dss, pll);
if (r)
return r;
return 0;
}
-int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
- struct hdmi_wp_data *wp)
+int hdmi_pll_init(struct dss_device *dss, struct platform_device *pdev,
+ struct hdmi_pll_data *pll, struct hdmi_wp_data *wp)
{
int r;
struct resource *res;
@@ -174,7 +175,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
if (IS_ERR(pll->base))
return PTR_ERR(pll->base);
- r = hdmi_init_pll_data(pdev, pll);
+ r = hdmi_init_pll_data(dss, pdev, pll);
if (r) {
DSSERR("failed to init HDMI PLL\n");
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 806e5fdcfe52..53bc5f78050c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -168,7 +168,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
{
u32 timing_h = 0;
u32 timing_v = 0;
- unsigned hsync_len_offset = 1;
+ unsigned int hsync_len_offset = 1;
DSSDBG("Enter hdmi_wp_video_config_timing\n");
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index f8f83e826a56..14d74adb13fb 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -59,7 +59,11 @@
#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29)
#define DISPC_IRQ_FRAMEDONE3 (1 << 30)
+struct dss_device;
+struct omap_drm_private;
struct omap_dss_device;
+struct dispc_device;
+struct dss_device;
struct dss_lcd_mgr_config;
struct snd_aes_iec958;
struct snd_cea_861_aud_if;
@@ -159,21 +163,6 @@ enum omap_overlay_caps {
OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
};
-enum omap_dss_clk_source {
- OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
- * OMAP4: DSS_FCLK */
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
- * OMAP4: PLL1_CLK1 */
- OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
- * OMAP4: PLL1_CLK2 */
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
- OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
-};
-
-enum omap_hdmi_flags {
- OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
-};
-
enum omap_dss_output_id {
OMAP_DSS_OUTPUT_DPI = 1 << 0,
OMAP_DSS_OUTPUT_DBI = 1 << 1,
@@ -198,8 +187,8 @@ enum omap_dss_dsi_trans_mode {
struct omap_dss_dsi_videomode_timings {
unsigned long hsclk;
- unsigned ndl;
- unsigned bitspp;
+ unsigned int ndl;
+ unsigned int bitspp;
/* pixels */
u16 hact;
@@ -585,7 +574,12 @@ struct omap_dss_driver {
const struct hdmi_avi_infoframe *avi);
};
-bool omapdss_is_initialized(void);
+struct dss_device *omapdss_get_dss(void);
+void omapdss_set_dss(struct dss_device *dss);
+static inline bool omapdss_is_initialized(void)
+{
+ return !!omapdss_get_dss();
+}
int omapdss_register_display(struct omap_dss_device *dssdev);
void omapdss_unregister_display(struct omap_dss_device *dssdev);
@@ -609,9 +603,6 @@ int omapdss_output_unset_device(struct omap_dss_device *out);
struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
-void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm);
-
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -632,97 +623,139 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
struct omap_dss_device *
omapdss_of_find_source_for_first_ep(struct device_node *node);
-void omapdss_set_is_initialized(bool set);
-
struct device_node *dss_of_port_get_parent_device(struct device_node *port);
u32 dss_of_port_get_port_number(struct device_node *port);
+enum dss_writeback_channel {
+ DSS_WB_LCD1_MGR = 0,
+ DSS_WB_LCD2_MGR = 1,
+ DSS_WB_TV_MGR = 2,
+ DSS_WB_OVL0 = 3,
+ DSS_WB_OVL1 = 4,
+ DSS_WB_OVL2 = 5,
+ DSS_WB_OVL3 = 6,
+ DSS_WB_LCD3_MGR = 7,
+};
+
struct dss_mgr_ops {
- int (*connect)(enum omap_channel channel,
- struct omap_dss_device *dst);
- void (*disconnect)(enum omap_channel channel,
- struct omap_dss_device *dst);
-
- void (*start_update)(enum omap_channel channel);
- int (*enable)(enum omap_channel channel);
- void (*disable)(enum omap_channel channel);
- void (*set_timings)(enum omap_channel channel,
- const struct videomode *vm);
- void (*set_lcd_config)(enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
- int (*register_framedone_handler)(enum omap_channel channel,
+ int (*connect)(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ struct omap_dss_device *dst);
+
+ void (*start_update)(struct omap_drm_private *priv,
+ enum omap_channel channel);
+ int (*enable)(struct omap_drm_private *priv,
+ enum omap_channel channel);
+ void (*disable)(struct omap_drm_private *priv,
+ enum omap_channel channel);
+ void (*set_timings)(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ const struct videomode *vm);
+ void (*set_lcd_config)(struct omap_drm_private *priv,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+ int (*register_framedone_handler)(struct omap_drm_private *priv,
+ enum omap_channel channel,
void (*handler)(void *), void *data);
- void (*unregister_framedone_handler)(enum omap_channel channel,
+ void (*unregister_framedone_handler)(struct omap_drm_private *priv,
+ enum omap_channel channel,
void (*handler)(void *), void *data);
};
-int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops);
+int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops,
+ struct omap_drm_private *priv);
void dss_uninstall_mgr_ops(void);
-int dss_mgr_connect(enum omap_channel channel,
- struct omap_dss_device *dst);
-void dss_mgr_disconnect(enum omap_channel channel,
- struct omap_dss_device *dst);
-void dss_mgr_set_timings(enum omap_channel channel,
+int dss_mgr_connect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+void dss_mgr_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm);
-void dss_mgr_set_lcd_config(enum omap_channel channel,
+void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
const struct dss_lcd_mgr_config *config);
-int dss_mgr_enable(enum omap_channel channel);
-void dss_mgr_disable(enum omap_channel channel);
-void dss_mgr_start_update(enum omap_channel channel);
-int dss_mgr_register_framedone_handler(enum omap_channel channel,
+int dss_mgr_enable(struct omap_dss_device *dssdev);
+void dss_mgr_disable(struct omap_dss_device *dssdev);
+void dss_mgr_start_update(struct omap_dss_device *dssdev);
+int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data);
-void dss_mgr_unregister_framedone_handler(enum omap_channel channel,
+void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data);
/* dispc ops */
struct dispc_ops {
- u32 (*read_irqstatus)(void);
- void (*clear_irqstatus)(u32 mask);
- void (*write_irqenable)(u32 mask);
-
- int (*request_irq)(irq_handler_t handler, void *dev_id);
- void (*free_irq)(void *dev_id);
-
- int (*runtime_get)(void);
- void (*runtime_put)(void);
-
- int (*get_num_ovls)(void);
- int (*get_num_mgrs)(void);
-
- u32 (*get_memory_bandwidth_limit)(void);
-
- void (*mgr_enable)(enum omap_channel channel, bool enable);
- bool (*mgr_is_enabled)(enum omap_channel channel);
- u32 (*mgr_get_vsync_irq)(enum omap_channel channel);
- u32 (*mgr_get_framedone_irq)(enum omap_channel channel);
- u32 (*mgr_get_sync_lost_irq)(enum omap_channel channel);
- bool (*mgr_go_busy)(enum omap_channel channel);
- void (*mgr_go)(enum omap_channel channel);
- void (*mgr_set_lcd_config)(enum omap_channel channel,
- const struct dss_lcd_mgr_config *config);
- void (*mgr_set_timings)(enum omap_channel channel,
- const struct videomode *vm);
- void (*mgr_setup)(enum omap_channel channel,
- const struct omap_overlay_manager_info *info);
- enum omap_dss_output_id (*mgr_get_supported_outputs)(enum omap_channel channel);
- u32 (*mgr_gamma_size)(enum omap_channel channel);
- void (*mgr_set_gamma)(enum omap_channel channel,
- const struct drm_color_lut *lut,
- unsigned int length);
-
- int (*ovl_enable)(enum omap_plane_id plane, bool enable);
- int (*ovl_setup)(enum omap_plane_id plane,
+ u32 (*read_irqstatus)(struct dispc_device *dispc);
+ void (*clear_irqstatus)(struct dispc_device *dispc, u32 mask);
+ void (*write_irqenable)(struct dispc_device *dispc, u32 mask);
+
+ int (*request_irq)(struct dispc_device *dispc, irq_handler_t handler,
+ void *dev_id);
+ void (*free_irq)(struct dispc_device *dispc, void *dev_id);
+
+ int (*runtime_get)(struct dispc_device *dispc);
+ void (*runtime_put)(struct dispc_device *dispc);
+
+ int (*get_num_ovls)(struct dispc_device *dispc);
+ int (*get_num_mgrs)(struct dispc_device *dispc);
+
+ u32 (*get_memory_bandwidth_limit)(struct dispc_device *dispc);
+
+ void (*mgr_enable)(struct dispc_device *dispc,
+ enum omap_channel channel, bool enable);
+ bool (*mgr_is_enabled)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ u32 (*mgr_get_vsync_irq)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ u32 (*mgr_get_framedone_irq)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ u32 (*mgr_get_sync_lost_irq)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ bool (*mgr_go_busy)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ void (*mgr_go)(struct dispc_device *dispc, enum omap_channel channel);
+ void (*mgr_set_lcd_config)(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct dss_lcd_mgr_config *config);
+ void (*mgr_set_timings)(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
+ void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel,
+ const struct omap_overlay_manager_info *info);
+ enum omap_dss_output_id (*mgr_get_supported_outputs)(
+ struct dispc_device *dispc, enum omap_channel channel);
+ u32 (*mgr_gamma_size)(struct dispc_device *dispc,
+ enum omap_channel channel);
+ void (*mgr_set_gamma)(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct drm_color_lut *lut,
+ unsigned int length);
+
+ int (*ovl_enable)(struct dispc_device *dispc, enum omap_plane_id plane,
+ bool enable);
+ int (*ovl_setup)(struct dispc_device *dispc, enum omap_plane_id plane,
const struct omap_overlay_info *oi,
- const struct videomode *vm, bool mem_to_mem,
- enum omap_channel channel);
+ const struct videomode *vm, bool mem_to_mem,
+ enum omap_channel channel);
+
+ const u32 *(*ovl_get_color_modes)(struct dispc_device *dispc,
+ enum omap_plane_id plane);
- const u32 *(*ovl_get_color_modes)(enum omap_plane_id plane);
+ u32 (*wb_get_framedone_irq)(struct dispc_device *dispc);
+ int (*wb_setup)(struct dispc_device *dispc,
+ const struct omap_dss_writeback_info *wi,
+ bool mem_to_mem, const struct videomode *vm,
+ enum dss_writeback_channel channel_in);
+ bool (*has_writeback)(struct dispc_device *dispc);
+ bool (*wb_go_busy)(struct dispc_device *dispc);
+ void (*wb_go)(struct dispc_device *dispc);
};
-void dispc_set_ops(const struct dispc_ops *o);
-const struct dispc_ops *dispc_get_ops(void);
+struct dispc_device *dispc_get_dispc(struct dss_device *dss);
+const struct dispc_ops *dispc_get_ops(struct dss_device *dss);
bool omapdss_component_is_display(struct device_node *node);
bool omapdss_component_is_output(struct device_node *node);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index b9afd80ae385..96b9d4cd505f 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -156,7 +156,6 @@ struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *po
return NULL;
}
-EXPORT_SYMBOL(omap_dss_find_output_by_port_node);
struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev)
{
@@ -171,13 +170,16 @@ struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device
EXPORT_SYMBOL(omapdss_find_output_from_display);
static const struct dss_mgr_ops *dss_mgr_ops;
+static struct omap_drm_private *dss_mgr_ops_priv;
-int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops)
+int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops,
+ struct omap_drm_private *priv)
{
if (dss_mgr_ops)
return -EBUSY;
dss_mgr_ops = mgr_ops;
+ dss_mgr_ops_priv = priv;
return 0;
}
@@ -186,64 +188,71 @@ EXPORT_SYMBOL(dss_install_mgr_ops);
void dss_uninstall_mgr_ops(void)
{
dss_mgr_ops = NULL;
+ dss_mgr_ops_priv = NULL;
}
EXPORT_SYMBOL(dss_uninstall_mgr_ops);
-int dss_mgr_connect(enum omap_channel channel,
- struct omap_dss_device *dst)
+int dss_mgr_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst)
{
- return dss_mgr_ops->connect(channel, dst);
+ return dss_mgr_ops->connect(dss_mgr_ops_priv,
+ dssdev->dispc_channel, dst);
}
EXPORT_SYMBOL(dss_mgr_connect);
-void dss_mgr_disconnect(enum omap_channel channel,
- struct omap_dss_device *dst)
+void dss_mgr_disconnect(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst)
{
- dss_mgr_ops->disconnect(channel, dst);
+ dss_mgr_ops->disconnect(dss_mgr_ops_priv, dssdev->dispc_channel, dst);
}
EXPORT_SYMBOL(dss_mgr_disconnect);
-void dss_mgr_set_timings(enum omap_channel channel, const struct videomode *vm)
+void dss_mgr_set_timings(struct omap_dss_device *dssdev,
+ const struct videomode *vm)
{
- dss_mgr_ops->set_timings(channel, vm);
+ dss_mgr_ops->set_timings(dss_mgr_ops_priv, dssdev->dispc_channel, vm);
}
EXPORT_SYMBOL(dss_mgr_set_timings);
-void dss_mgr_set_lcd_config(enum omap_channel channel,
+void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
const struct dss_lcd_mgr_config *config)
{
- dss_mgr_ops->set_lcd_config(channel, config);
+ dss_mgr_ops->set_lcd_config(dss_mgr_ops_priv,
+ dssdev->dispc_channel, config);
}
EXPORT_SYMBOL(dss_mgr_set_lcd_config);
-int dss_mgr_enable(enum omap_channel channel)
+int dss_mgr_enable(struct omap_dss_device *dssdev)
{
- return dss_mgr_ops->enable(channel);
+ return dss_mgr_ops->enable(dss_mgr_ops_priv, dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_enable);
-void dss_mgr_disable(enum omap_channel channel)
+void dss_mgr_disable(struct omap_dss_device *dssdev)
{
- dss_mgr_ops->disable(channel);
+ dss_mgr_ops->disable(dss_mgr_ops_priv, dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_disable);
-void dss_mgr_start_update(enum omap_channel channel)
+void dss_mgr_start_update(struct omap_dss_device *dssdev)
{
- dss_mgr_ops->start_update(channel);
+ dss_mgr_ops->start_update(dss_mgr_ops_priv, dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_start_update);
-int dss_mgr_register_framedone_handler(enum omap_channel channel,
+int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
- return dss_mgr_ops->register_framedone_handler(channel, handler, data);
+ return dss_mgr_ops->register_framedone_handler(dss_mgr_ops_priv,
+ dssdev->dispc_channel,
+ handler, data);
}
EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
-void dss_mgr_unregister_framedone_handler(enum omap_channel channel,
+void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
- dss_mgr_ops->unregister_framedone_handler(channel, handler, data);
+ dss_mgr_ops->unregister_framedone_handler(dss_mgr_ops_priv,
+ dssdev->dispc_channel,
+ handler, data);
}
EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index 058714b1eb56..078b0e8216c3 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -35,15 +35,14 @@
#define PLL_SSC_CONFIGURATION2 0x001C
#define PLL_CONFIGURATION4 0x0020
-static struct dss_pll *dss_plls[4];
-
-int dss_pll_register(struct dss_pll *pll)
+int dss_pll_register(struct dss_device *dss, struct dss_pll *pll)
{
int i;
- for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
- if (!dss_plls[i]) {
- dss_plls[i] = pll;
+ for (i = 0; i < ARRAY_SIZE(dss->plls); ++i) {
+ if (!dss->plls[i]) {
+ dss->plls[i] = pll;
+ pll->dss = dss;
return 0;
}
}
@@ -53,29 +52,32 @@ int dss_pll_register(struct dss_pll *pll)
void dss_pll_unregister(struct dss_pll *pll)
{
+ struct dss_device *dss = pll->dss;
int i;
- for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
- if (dss_plls[i] == pll) {
- dss_plls[i] = NULL;
+ for (i = 0; i < ARRAY_SIZE(dss->plls); ++i) {
+ if (dss->plls[i] == pll) {
+ dss->plls[i] = NULL;
+ pll->dss = NULL;
return;
}
}
}
-struct dss_pll *dss_pll_find(const char *name)
+struct dss_pll *dss_pll_find(struct dss_device *dss, const char *name)
{
int i;
- for (i = 0; i < ARRAY_SIZE(dss_plls); ++i) {
- if (dss_plls[i] && strcmp(dss_plls[i]->name, name) == 0)
- return dss_plls[i];
+ for (i = 0; i < ARRAY_SIZE(dss->plls); ++i) {
+ if (dss->plls[i] && strcmp(dss->plls[i]->name, name) == 0)
+ return dss->plls[i];
}
return NULL;
}
-struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src)
+struct dss_pll *dss_pll_find_by_src(struct dss_device *dss,
+ enum dss_clk_source src)
{
struct dss_pll *pll;
@@ -85,27 +87,27 @@ struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src)
return NULL;
case DSS_CLK_SRC_HDMI_PLL:
- return dss_pll_find("hdmi");
+ return dss_pll_find(dss, "hdmi");
case DSS_CLK_SRC_PLL1_1:
case DSS_CLK_SRC_PLL1_2:
case DSS_CLK_SRC_PLL1_3:
- pll = dss_pll_find("dsi0");
+ pll = dss_pll_find(dss, "dsi0");
if (!pll)
- pll = dss_pll_find("video0");
+ pll = dss_pll_find(dss, "video0");
return pll;
case DSS_CLK_SRC_PLL2_1:
case DSS_CLK_SRC_PLL2_2:
case DSS_CLK_SRC_PLL2_3:
- pll = dss_pll_find("dsi1");
+ pll = dss_pll_find(dss, "dsi1");
if (!pll)
- pll = dss_pll_find("video1");
+ pll = dss_pll_find(dss, "video1");
return pll;
}
}
-unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
+unsigned int dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
{
switch (src) {
case DSS_CLK_SRC_HDMI_PLL:
@@ -277,7 +279,7 @@ bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
unsigned long fint, clkdco, clkout;
unsigned long target_clkdco;
unsigned long min_dco;
- unsigned n, m, mf, m2, sd;
+ unsigned int n, m, mf, m2, sd;
const struct dss_pll_hw *hw = pll->hw;
DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index d8ab31f3a813..68a40ae26f5b 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -29,8 +29,9 @@
#include "omapdss.h"
#include "dss.h"
-static struct {
+struct sdi_device {
struct platform_device *pdev;
+ struct dss_device *dss;
bool update_enabled;
struct regulator *vdds_sdi_reg;
@@ -40,11 +41,12 @@ static struct {
int datapairs;
struct omap_dss_device output;
+};
- bool port_initialized;
-} sdi;
+#define dssdev_to_sdi(dssdev) container_of(dssdev, struct sdi_device, output)
struct sdi_clk_calc_ctx {
+ struct sdi_device *sdi;
unsigned long pck_min, pck_max;
unsigned long fck;
@@ -70,16 +72,17 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
ctx->fck = fck;
- return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max,
- dpi_calc_dispc_cb, ctx);
+ return dispc_div_calc(ctx->sdi->dss->dispc, fck,
+ ctx->pck_min, ctx->pck_max,
+ dpi_calc_dispc_cb, ctx);
}
-static int sdi_calc_clock_div(unsigned long pclk,
- unsigned long *fck,
- struct dispc_clock_info *dispc_cinfo)
+static int sdi_calc_clock_div(struct sdi_device *sdi, unsigned long pclk,
+ unsigned long *fck,
+ struct dispc_clock_info *dispc_cinfo)
{
int i;
- struct sdi_clk_calc_ctx ctx;
+ struct sdi_clk_calc_ctx ctx = { .sdi = sdi };
/*
* DSS fclk gives us very few possibilities, so finding a good pixel
@@ -98,7 +101,8 @@ static int sdi_calc_clock_div(unsigned long pclk,
ctx.pck_min = 0;
ctx.pck_max = pclk + 1000 * i * i * i;
- ok = dss_div_calc(pclk, ctx.pck_min, dpi_calc_dss_cb, &ctx);
+ ok = dss_div_calc(sdi->dss, pclk, ctx.pck_min,
+ dpi_calc_dss_cb, &ctx);
if (ok) {
*fck = ctx.fck;
*dispc_cinfo = ctx.dispc_cinfo;
@@ -109,52 +113,49 @@ static int sdi_calc_clock_div(unsigned long pclk,
return -EINVAL;
}
-static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
+static void sdi_config_lcd_manager(struct sdi_device *sdi)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ sdi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
- sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS;
+ sdi->mgr_config.stallmode = false;
+ sdi->mgr_config.fifohandcheck = false;
- sdi.mgr_config.stallmode = false;
- sdi.mgr_config.fifohandcheck = false;
+ sdi->mgr_config.video_port_width = 24;
+ sdi->mgr_config.lcden_sig_polarity = 1;
- sdi.mgr_config.video_port_width = 24;
- sdi.mgr_config.lcden_sig_polarity = 1;
-
- dss_mgr_set_lcd_config(channel, &sdi.mgr_config);
+ dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
}
static int sdi_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_device *out = &sdi.output;
- enum omap_channel channel = dssdev->dispc_channel;
- struct videomode *vm = &sdi.vm;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ struct videomode *vm = &sdi->vm;
unsigned long fck;
struct dispc_clock_info dispc_cinfo;
unsigned long pck;
int r;
- if (!out->dispc_channel_connected) {
+ if (!sdi->output.dispc_channel_connected) {
DSSERR("failed to enable display: no output/manager\n");
return -ENODEV;
}
- r = regulator_enable(sdi.vdds_sdi_reg);
+ r = regulator_enable(sdi->vdds_sdi_reg);
if (r)
goto err_reg_enable;
- r = dispc_runtime_get();
+ r = dispc_runtime_get(sdi->dss->dispc);
if (r)
goto err_get_dispc;
/* 15.5.9.1.2 */
vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE;
- r = sdi_calc_clock_div(vm->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
- sdi.mgr_config.clock_info = dispc_cinfo;
+ sdi->mgr_config.clock_info = dispc_cinfo;
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
@@ -166,13 +167,13 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
}
- dss_mgr_set_timings(channel, vm);
+ dss_mgr_set_timings(&sdi->output, vm);
- r = dss_set_fck_rate(fck);
+ r = dss_set_fck_rate(sdi->dss, fck);
if (r)
goto err_set_dss_clock_div;
- sdi_config_lcd_manager(dssdev);
+ sdi_config_lcd_manager(sdi);
/*
* LCLK and PCLK divisors are located in shadow registers, and we
@@ -185,63 +186,69 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
* need to care about the shadow register mechanism for pck-free. The
* exact reason for this is unknown.
*/
- dispc_mgr_set_clock_div(channel, &sdi.mgr_config.clock_info);
+ dispc_mgr_set_clock_div(sdi->dss->dispc, sdi->output.dispc_channel,
+ &sdi->mgr_config.clock_info);
- dss_sdi_init(sdi.datapairs);
- r = dss_sdi_enable();
+ dss_sdi_init(sdi->dss, sdi->datapairs);
+ r = dss_sdi_enable(sdi->dss);
if (r)
goto err_sdi_enable;
mdelay(2);
- r = dss_mgr_enable(channel);
+ r = dss_mgr_enable(&sdi->output);
if (r)
goto err_mgr_enable;
return 0;
err_mgr_enable:
- dss_sdi_disable();
+ dss_sdi_disable(sdi->dss);
err_sdi_enable:
err_set_dss_clock_div:
err_calc_clock_div:
- dispc_runtime_put();
+ dispc_runtime_put(sdi->dss->dispc);
err_get_dispc:
- regulator_disable(sdi.vdds_sdi_reg);
+ regulator_disable(sdi->vdds_sdi_reg);
err_reg_enable:
return r;
}
static void sdi_display_disable(struct omap_dss_device *dssdev)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- dss_mgr_disable(channel);
+ dss_mgr_disable(&sdi->output);
- dss_sdi_disable();
+ dss_sdi_disable(sdi->dss);
- dispc_runtime_put();
+ dispc_runtime_put(sdi->dss->dispc);
- regulator_disable(sdi.vdds_sdi_reg);
+ regulator_disable(sdi->vdds_sdi_reg);
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- sdi.vm = *vm;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+
+ sdi->vm = *vm;
}
static void sdi_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- *vm = sdi.vm;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+
+ *vm = sdi->vm;
}
static int sdi_check_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
enum omap_channel channel = dssdev->dispc_channel;
- if (!dispc_mgr_timings_ok(channel, vm))
+ if (!dispc_mgr_timings_ok(sdi->dss->dispc, channel, vm))
return -EINVAL;
if (vm->pixelclock == 0)
@@ -250,21 +257,21 @@ static int sdi_check_timings(struct omap_dss_device *dssdev,
return 0;
}
-static int sdi_init_regulator(void)
+static int sdi_init_regulator(struct sdi_device *sdi)
{
struct regulator *vdds_sdi;
- if (sdi.vdds_sdi_reg)
+ if (sdi->vdds_sdi_reg)
return 0;
- vdds_sdi = devm_regulator_get(&sdi.pdev->dev, "vdds_sdi");
+ vdds_sdi = devm_regulator_get(&sdi->pdev->dev, "vdds_sdi");
if (IS_ERR(vdds_sdi)) {
if (PTR_ERR(vdds_sdi) != -EPROBE_DEFER)
DSSERR("can't get VDDS_SDI regulator\n");
return PTR_ERR(vdds_sdi);
}
- sdi.vdds_sdi_reg = vdds_sdi;
+ sdi->vdds_sdi_reg = vdds_sdi;
return 0;
}
@@ -272,14 +279,14 @@ static int sdi_init_regulator(void)
static int sdi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
int r;
- r = sdi_init_regulator();
+ r = sdi_init_regulator(sdi);
if (r)
return r;
- r = dss_mgr_connect(channel, dssdev);
+ r = dss_mgr_connect(&sdi->output, dssdev);
if (r)
return r;
@@ -287,7 +294,7 @@ static int sdi_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&sdi->output, dssdev);
return r;
}
@@ -297,7 +304,7 @@ static int sdi_connect(struct omap_dss_device *dssdev,
static void sdi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct sdi_device *sdi = dssdev_to_sdi(dssdev);
WARN_ON(dst != dssdev->dst);
@@ -306,7 +313,7 @@ static void sdi_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&sdi->output, dssdev);
}
static const struct omapdss_sdi_ops sdi_ops = {
@@ -321,11 +328,11 @@ static const struct omapdss_sdi_ops sdi_ops = {
.get_timings = sdi_get_timings,
};
-static void sdi_init_output(struct platform_device *pdev)
+static void sdi_init_output(struct sdi_device *sdi)
{
- struct omap_dss_device *out = &sdi.output;
+ struct omap_dss_device *out = &sdi->output;
- out->dev = &pdev->dev;
+ out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
out->output_type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
@@ -338,22 +345,28 @@ static void sdi_init_output(struct platform_device *pdev)
omapdss_register_output(out);
}
-static void sdi_uninit_output(struct platform_device *pdev)
+static void sdi_uninit_output(struct sdi_device *sdi)
{
- struct omap_dss_device *out = &sdi.output;
-
- omapdss_unregister_output(out);
+ omapdss_unregister_output(&sdi->output);
}
-int sdi_init_port(struct platform_device *pdev, struct device_node *port)
+int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
+ struct device_node *port)
{
+ struct sdi_device *sdi;
struct device_node *ep;
u32 datapairs;
int r;
+ sdi = kzalloc(sizeof(*sdi), GFP_KERNEL);
+ if (!sdi)
+ return -ENOMEM;
+
ep = of_get_next_child(port, NULL);
- if (!ep)
- return 0;
+ if (!ep) {
+ r = 0;
+ goto err_free;
+ }
r = of_property_read_u32(ep, "datapairs", &datapairs);
if (r) {
@@ -361,28 +374,33 @@ int sdi_init_port(struct platform_device *pdev, struct device_node *port)
goto err_datapairs;
}
- sdi.datapairs = datapairs;
+ sdi->datapairs = datapairs;
+ sdi->dss = dss;
of_node_put(ep);
- sdi.pdev = pdev;
-
- sdi_init_output(pdev);
+ sdi->pdev = pdev;
+ port->data = sdi;
- sdi.port_initialized = true;
+ sdi_init_output(sdi);
return 0;
err_datapairs:
of_node_put(ep);
+err_free:
+ kfree(sdi);
return r;
}
void sdi_uninit_port(struct device_node *port)
{
- if (!sdi.port_initialized)
+ struct sdi_device *sdi = port->data;
+
+ if (!sdi)
return;
- sdi_uninit_output(sdi.pdev);
+ sdi_uninit_output(sdi);
+ kfree(sdi);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 6de9d734ddb9..24d1ced210bd 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -319,12 +319,15 @@ static enum venc_videomode venc_get_videomode(const struct videomode *vm)
return VENC_MODE_UNKNOWN;
}
-static struct {
+struct venc_device {
struct platform_device *pdev;
void __iomem *base;
struct mutex venc_lock;
u32 wss_data;
struct regulator *vdda_dac_reg;
+ struct dss_device *dss;
+
+ struct dss_debugfs_entry *debugfs;
struct clk *tv_dac_clk;
@@ -334,81 +337,87 @@ static struct {
bool requires_tv_dac_clk;
struct omap_dss_device output;
-} venc;
+};
+
+#define dssdev_to_venc(dssdev) container_of(dssdev, struct venc_device, output)
-static inline void venc_write_reg(int idx, u32 val)
+static inline void venc_write_reg(struct venc_device *venc, int idx, u32 val)
{
- __raw_writel(val, venc.base + idx);
+ __raw_writel(val, venc->base + idx);
}
-static inline u32 venc_read_reg(int idx)
+static inline u32 venc_read_reg(struct venc_device *venc, int idx)
{
- u32 l = __raw_readl(venc.base + idx);
+ u32 l = __raw_readl(venc->base + idx);
return l;
}
-static void venc_write_config(const struct venc_config *config)
+static void venc_write_config(struct venc_device *venc,
+ const struct venc_config *config)
{
DSSDBG("write venc conf\n");
- venc_write_reg(VENC_LLEN, config->llen);
- venc_write_reg(VENC_FLENS, config->flens);
- venc_write_reg(VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr);
- venc_write_reg(VENC_C_PHASE, config->c_phase);
- venc_write_reg(VENC_GAIN_U, config->gain_u);
- venc_write_reg(VENC_GAIN_V, config->gain_v);
- venc_write_reg(VENC_GAIN_Y, config->gain_y);
- venc_write_reg(VENC_BLACK_LEVEL, config->black_level);
- venc_write_reg(VENC_BLANK_LEVEL, config->blank_level);
- venc_write_reg(VENC_M_CONTROL, config->m_control);
- venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
- venc.wss_data);
- venc_write_reg(VENC_S_CARR, config->s_carr);
- venc_write_reg(VENC_L21__WC_CTL, config->l21__wc_ctl);
- venc_write_reg(VENC_SAVID__EAVID, config->savid__eavid);
- venc_write_reg(VENC_FLEN__FAL, config->flen__fal);
- venc_write_reg(VENC_LAL__PHASE_RESET, config->lal__phase_reset);
- venc_write_reg(VENC_HS_INT_START_STOP_X, config->hs_int_start_stop_x);
- venc_write_reg(VENC_HS_EXT_START_STOP_X, config->hs_ext_start_stop_x);
- venc_write_reg(VENC_VS_INT_START_X, config->vs_int_start_x);
- venc_write_reg(VENC_VS_INT_STOP_X__VS_INT_START_Y,
+ venc_write_reg(venc, VENC_LLEN, config->llen);
+ venc_write_reg(venc, VENC_FLENS, config->flens);
+ venc_write_reg(venc, VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr);
+ venc_write_reg(venc, VENC_C_PHASE, config->c_phase);
+ venc_write_reg(venc, VENC_GAIN_U, config->gain_u);
+ venc_write_reg(venc, VENC_GAIN_V, config->gain_v);
+ venc_write_reg(venc, VENC_GAIN_Y, config->gain_y);
+ venc_write_reg(venc, VENC_BLACK_LEVEL, config->black_level);
+ venc_write_reg(venc, VENC_BLANK_LEVEL, config->blank_level);
+ venc_write_reg(venc, VENC_M_CONTROL, config->m_control);
+ venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
+ venc->wss_data);
+ venc_write_reg(venc, VENC_S_CARR, config->s_carr);
+ venc_write_reg(venc, VENC_L21__WC_CTL, config->l21__wc_ctl);
+ venc_write_reg(venc, VENC_SAVID__EAVID, config->savid__eavid);
+ venc_write_reg(venc, VENC_FLEN__FAL, config->flen__fal);
+ venc_write_reg(venc, VENC_LAL__PHASE_RESET, config->lal__phase_reset);
+ venc_write_reg(venc, VENC_HS_INT_START_STOP_X,
+ config->hs_int_start_stop_x);
+ venc_write_reg(venc, VENC_HS_EXT_START_STOP_X,
+ config->hs_ext_start_stop_x);
+ venc_write_reg(venc, VENC_VS_INT_START_X, config->vs_int_start_x);
+ venc_write_reg(venc, VENC_VS_INT_STOP_X__VS_INT_START_Y,
config->vs_int_stop_x__vs_int_start_y);
- venc_write_reg(VENC_VS_INT_STOP_Y__VS_EXT_START_X,
+ venc_write_reg(venc, VENC_VS_INT_STOP_Y__VS_EXT_START_X,
config->vs_int_stop_y__vs_ext_start_x);
- venc_write_reg(VENC_VS_EXT_STOP_X__VS_EXT_START_Y,
+ venc_write_reg(venc, VENC_VS_EXT_STOP_X__VS_EXT_START_Y,
config->vs_ext_stop_x__vs_ext_start_y);
- venc_write_reg(VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y);
- venc_write_reg(VENC_AVID_START_STOP_X, config->avid_start_stop_x);
- venc_write_reg(VENC_AVID_START_STOP_Y, config->avid_start_stop_y);
- venc_write_reg(VENC_FID_INT_START_X__FID_INT_START_Y,
+ venc_write_reg(venc, VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y);
+ venc_write_reg(venc, VENC_AVID_START_STOP_X, config->avid_start_stop_x);
+ venc_write_reg(venc, VENC_AVID_START_STOP_Y, config->avid_start_stop_y);
+ venc_write_reg(venc, VENC_FID_INT_START_X__FID_INT_START_Y,
config->fid_int_start_x__fid_int_start_y);
- venc_write_reg(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X,
+ venc_write_reg(venc, VENC_FID_INT_OFFSET_Y__FID_EXT_START_X,
config->fid_int_offset_y__fid_ext_start_x);
- venc_write_reg(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y,
+ venc_write_reg(venc, VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y,
config->fid_ext_start_y__fid_ext_offset_y);
- venc_write_reg(VENC_DAC_B__DAC_C, venc_read_reg(VENC_DAC_B__DAC_C));
- venc_write_reg(VENC_VIDOUT_CTRL, config->vidout_ctrl);
- venc_write_reg(VENC_HFLTR_CTRL, config->hfltr_ctrl);
- venc_write_reg(VENC_X_COLOR, config->x_color);
- venc_write_reg(VENC_LINE21, config->line21);
- venc_write_reg(VENC_LN_SEL, config->ln_sel);
- venc_write_reg(VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger);
- venc_write_reg(VENC_TVDETGP_INT_START_STOP_X,
+ venc_write_reg(venc, VENC_DAC_B__DAC_C,
+ venc_read_reg(venc, VENC_DAC_B__DAC_C));
+ venc_write_reg(venc, VENC_VIDOUT_CTRL, config->vidout_ctrl);
+ venc_write_reg(venc, VENC_HFLTR_CTRL, config->hfltr_ctrl);
+ venc_write_reg(venc, VENC_X_COLOR, config->x_color);
+ venc_write_reg(venc, VENC_LINE21, config->line21);
+ venc_write_reg(venc, VENC_LN_SEL, config->ln_sel);
+ venc_write_reg(venc, VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger);
+ venc_write_reg(venc, VENC_TVDETGP_INT_START_STOP_X,
config->tvdetgp_int_start_stop_x);
- venc_write_reg(VENC_TVDETGP_INT_START_STOP_Y,
+ venc_write_reg(venc, VENC_TVDETGP_INT_START_STOP_Y,
config->tvdetgp_int_start_stop_y);
- venc_write_reg(VENC_GEN_CTRL, config->gen_ctrl);
- venc_write_reg(VENC_F_CONTROL, config->f_control);
- venc_write_reg(VENC_SYNC_CTRL, config->sync_ctrl);
+ venc_write_reg(venc, VENC_GEN_CTRL, config->gen_ctrl);
+ venc_write_reg(venc, VENC_F_CONTROL, config->f_control);
+ venc_write_reg(venc, VENC_SYNC_CTRL, config->sync_ctrl);
}
-static void venc_reset(void)
+static void venc_reset(struct venc_device *venc)
{
int t = 1000;
- venc_write_reg(VENC_F_CONTROL, 1<<8);
- while (venc_read_reg(VENC_F_CONTROL) & (1<<8)) {
+ venc_write_reg(venc, VENC_F_CONTROL, 1<<8);
+ while (venc_read_reg(venc, VENC_F_CONTROL) & (1<<8)) {
if (--t == 0) {
DSSERR("Failed to reset venc\n");
return;
@@ -422,24 +431,24 @@ static void venc_reset(void)
#endif
}
-static int venc_runtime_get(void)
+static int venc_runtime_get(struct venc_device *venc)
{
int r;
DSSDBG("venc_runtime_get\n");
- r = pm_runtime_get_sync(&venc.pdev->dev);
+ r = pm_runtime_get_sync(&venc->pdev->dev);
WARN_ON(r < 0);
return r < 0 ? r : 0;
}
-static void venc_runtime_put(void)
+static void venc_runtime_put(struct venc_device *venc)
{
int r;
DSSDBG("venc_runtime_put\n");
- r = pm_runtime_put_sync(&venc.pdev->dev);
+ r = pm_runtime_put_sync(&venc->pdev->dev);
WARN_ON(r < 0 && r != -ENOSYS);
}
@@ -455,119 +464,119 @@ static const struct venc_config *venc_timings_to_config(struct videomode *vm)
}
}
-static int venc_power_on(struct omap_dss_device *dssdev)
+static int venc_power_on(struct venc_device *venc)
{
- enum omap_channel channel = dssdev->dispc_channel;
u32 l;
int r;
- r = venc_runtime_get();
+ r = venc_runtime_get(venc);
if (r)
goto err0;
- venc_reset();
- venc_write_config(venc_timings_to_config(&venc.vm));
+ venc_reset(venc);
+ venc_write_config(venc, venc_timings_to_config(&venc->vm));
- dss_set_venc_output(venc.type);
- dss_set_dac_pwrdn_bgz(1);
+ dss_set_venc_output(venc->dss, venc->type);
+ dss_set_dac_pwrdn_bgz(venc->dss, 1);
l = 0;
- if (venc.type == OMAP_DSS_VENC_TYPE_COMPOSITE)
+ if (venc->type == OMAP_DSS_VENC_TYPE_COMPOSITE)
l |= 1 << 1;
else /* S-Video */
l |= (1 << 0) | (1 << 2);
- if (venc.invert_polarity == false)
+ if (venc->invert_polarity == false)
l |= 1 << 3;
- venc_write_reg(VENC_OUTPUT_CONTROL, l);
+ venc_write_reg(venc, VENC_OUTPUT_CONTROL, l);
- dss_mgr_set_timings(channel, &venc.vm);
+ dss_mgr_set_timings(&venc->output, &venc->vm);
- r = regulator_enable(venc.vdda_dac_reg);
+ r = regulator_enable(venc->vdda_dac_reg);
if (r)
goto err1;
- r = dss_mgr_enable(channel);
+ r = dss_mgr_enable(&venc->output);
if (r)
goto err2;
return 0;
err2:
- regulator_disable(venc.vdda_dac_reg);
+ regulator_disable(venc->vdda_dac_reg);
err1:
- venc_write_reg(VENC_OUTPUT_CONTROL, 0);
- dss_set_dac_pwrdn_bgz(0);
+ venc_write_reg(venc, VENC_OUTPUT_CONTROL, 0);
+ dss_set_dac_pwrdn_bgz(venc->dss, 0);
- venc_runtime_put();
+ venc_runtime_put(venc);
err0:
return r;
}
-static void venc_power_off(struct omap_dss_device *dssdev)
+static void venc_power_off(struct venc_device *venc)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ venc_write_reg(venc, VENC_OUTPUT_CONTROL, 0);
+ dss_set_dac_pwrdn_bgz(venc->dss, 0);
- venc_write_reg(VENC_OUTPUT_CONTROL, 0);
- dss_set_dac_pwrdn_bgz(0);
+ dss_mgr_disable(&venc->output);
- dss_mgr_disable(channel);
+ regulator_disable(venc->vdda_dac_reg);
- regulator_disable(venc.vdda_dac_reg);
-
- venc_runtime_put();
+ venc_runtime_put(venc);
}
static int venc_display_enable(struct omap_dss_device *dssdev)
{
- struct omap_dss_device *out = &venc.output;
+ struct venc_device *venc = dssdev_to_venc(dssdev);
int r;
DSSDBG("venc_display_enable\n");
- mutex_lock(&venc.venc_lock);
+ mutex_lock(&venc->venc_lock);
- if (!out->dispc_channel_connected) {
+ if (!dssdev->dispc_channel_connected) {
DSSERR("Failed to enable display: no output/manager\n");
r = -ENODEV;
goto err0;
}
- r = venc_power_on(dssdev);
+ r = venc_power_on(venc);
if (r)
goto err0;
- venc.wss_data = 0;
+ venc->wss_data = 0;
- mutex_unlock(&venc.venc_lock);
+ mutex_unlock(&venc->venc_lock);
return 0;
err0:
- mutex_unlock(&venc.venc_lock);
+ mutex_unlock(&venc->venc_lock);
return r;
}
static void venc_display_disable(struct omap_dss_device *dssdev)
{
+ struct venc_device *venc = dssdev_to_venc(dssdev);
+
DSSDBG("venc_display_disable\n");
- mutex_lock(&venc.venc_lock);
+ mutex_lock(&venc->venc_lock);
- venc_power_off(dssdev);
+ venc_power_off(venc);
- mutex_unlock(&venc.venc_lock);
+ mutex_unlock(&venc->venc_lock);
}
static void venc_set_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
+ struct venc_device *venc = dssdev_to_venc(dssdev);
struct videomode actual_vm;
DSSDBG("venc_set_timings\n");
- mutex_lock(&venc.venc_lock);
+ mutex_lock(&venc->venc_lock);
switch (venc_get_videomode(vm)) {
default:
@@ -581,14 +590,14 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
}
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc.vm, &actual_vm, sizeof(actual_vm)))
- venc.wss_data = 0;
+ if (memcmp(&venc->vm, &actual_vm, sizeof(actual_vm)))
+ venc->wss_data = 0;
- venc.vm = actual_vm;
+ venc->vm = actual_vm;
- dispc_set_tv_pclk(13500000);
+ dispc_set_tv_pclk(venc->dss->dispc, 13500000);
- mutex_unlock(&venc.venc_lock);
+ mutex_unlock(&venc->venc_lock);
}
static int venc_check_timings(struct omap_dss_device *dssdev,
@@ -608,127 +617,136 @@ static int venc_check_timings(struct omap_dss_device *dssdev,
static void venc_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
- mutex_lock(&venc.venc_lock);
+ struct venc_device *venc = dssdev_to_venc(dssdev);
- *vm = venc.vm;
+ mutex_lock(&venc->venc_lock);
- mutex_unlock(&venc.venc_lock);
+ *vm = venc->vm;
+
+ mutex_unlock(&venc->venc_lock);
}
static u32 venc_get_wss(struct omap_dss_device *dssdev)
{
+ struct venc_device *venc = dssdev_to_venc(dssdev);
+
/* Invert due to VENC_L21_WC_CTL:INV=1 */
- return (venc.wss_data >> 8) ^ 0xfffff;
+ return (venc->wss_data >> 8) ^ 0xfffff;
}
static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
{
+ struct venc_device *venc = dssdev_to_venc(dssdev);
const struct venc_config *config;
int r;
DSSDBG("venc_set_wss\n");
- mutex_lock(&venc.venc_lock);
+ mutex_lock(&venc->venc_lock);
- config = venc_timings_to_config(&venc.vm);
+ config = venc_timings_to_config(&venc->vm);
/* Invert due to VENC_L21_WC_CTL:INV=1 */
- venc.wss_data = (wss ^ 0xfffff) << 8;
+ venc->wss_data = (wss ^ 0xfffff) << 8;
- r = venc_runtime_get();
+ r = venc_runtime_get(venc);
if (r)
goto err;
- venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
- venc.wss_data);
+ venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
+ venc->wss_data);
- venc_runtime_put();
+ venc_runtime_put(venc);
err:
- mutex_unlock(&venc.venc_lock);
+ mutex_unlock(&venc->venc_lock);
return r;
}
-static int venc_init_regulator(void)
+static int venc_init_regulator(struct venc_device *venc)
{
struct regulator *vdda_dac;
- if (venc.vdda_dac_reg != NULL)
+ if (venc->vdda_dac_reg != NULL)
return 0;
- vdda_dac = devm_regulator_get(&venc.pdev->dev, "vdda");
+ vdda_dac = devm_regulator_get(&venc->pdev->dev, "vdda");
if (IS_ERR(vdda_dac)) {
if (PTR_ERR(vdda_dac) != -EPROBE_DEFER)
DSSERR("can't get VDDA_DAC regulator\n");
return PTR_ERR(vdda_dac);
}
- venc.vdda_dac_reg = vdda_dac;
+ venc->vdda_dac_reg = vdda_dac;
return 0;
}
-static void venc_dump_regs(struct seq_file *s)
+static int venc_dump_regs(struct seq_file *s, void *p)
{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
+ struct venc_device *venc = s->private;
- if (venc_runtime_get())
- return;
+#define DUMPREG(venc, r) \
+ seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(venc, r))
+
+ if (venc_runtime_get(venc))
+ return 0;
- DUMPREG(VENC_F_CONTROL);
- DUMPREG(VENC_VIDOUT_CTRL);
- DUMPREG(VENC_SYNC_CTRL);
- DUMPREG(VENC_LLEN);
- DUMPREG(VENC_FLENS);
- DUMPREG(VENC_HFLTR_CTRL);
- DUMPREG(VENC_CC_CARR_WSS_CARR);
- DUMPREG(VENC_C_PHASE);
- DUMPREG(VENC_GAIN_U);
- DUMPREG(VENC_GAIN_V);
- DUMPREG(VENC_GAIN_Y);
- DUMPREG(VENC_BLACK_LEVEL);
- DUMPREG(VENC_BLANK_LEVEL);
- DUMPREG(VENC_X_COLOR);
- DUMPREG(VENC_M_CONTROL);
- DUMPREG(VENC_BSTAMP_WSS_DATA);
- DUMPREG(VENC_S_CARR);
- DUMPREG(VENC_LINE21);
- DUMPREG(VENC_LN_SEL);
- DUMPREG(VENC_L21__WC_CTL);
- DUMPREG(VENC_HTRIGGER_VTRIGGER);
- DUMPREG(VENC_SAVID__EAVID);
- DUMPREG(VENC_FLEN__FAL);
- DUMPREG(VENC_LAL__PHASE_RESET);
- DUMPREG(VENC_HS_INT_START_STOP_X);
- DUMPREG(VENC_HS_EXT_START_STOP_X);
- DUMPREG(VENC_VS_INT_START_X);
- DUMPREG(VENC_VS_INT_STOP_X__VS_INT_START_Y);
- DUMPREG(VENC_VS_INT_STOP_Y__VS_EXT_START_X);
- DUMPREG(VENC_VS_EXT_STOP_X__VS_EXT_START_Y);
- DUMPREG(VENC_VS_EXT_STOP_Y);
- DUMPREG(VENC_AVID_START_STOP_X);
- DUMPREG(VENC_AVID_START_STOP_Y);
- DUMPREG(VENC_FID_INT_START_X__FID_INT_START_Y);
- DUMPREG(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X);
- DUMPREG(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y);
- DUMPREG(VENC_TVDETGP_INT_START_STOP_X);
- DUMPREG(VENC_TVDETGP_INT_START_STOP_Y);
- DUMPREG(VENC_GEN_CTRL);
- DUMPREG(VENC_OUTPUT_CONTROL);
- DUMPREG(VENC_OUTPUT_TEST);
-
- venc_runtime_put();
+ DUMPREG(venc, VENC_F_CONTROL);
+ DUMPREG(venc, VENC_VIDOUT_CTRL);
+ DUMPREG(venc, VENC_SYNC_CTRL);
+ DUMPREG(venc, VENC_LLEN);
+ DUMPREG(venc, VENC_FLENS);
+ DUMPREG(venc, VENC_HFLTR_CTRL);
+ DUMPREG(venc, VENC_CC_CARR_WSS_CARR);
+ DUMPREG(venc, VENC_C_PHASE);
+ DUMPREG(venc, VENC_GAIN_U);
+ DUMPREG(venc, VENC_GAIN_V);
+ DUMPREG(venc, VENC_GAIN_Y);
+ DUMPREG(venc, VENC_BLACK_LEVEL);
+ DUMPREG(venc, VENC_BLANK_LEVEL);
+ DUMPREG(venc, VENC_X_COLOR);
+ DUMPREG(venc, VENC_M_CONTROL);
+ DUMPREG(venc, VENC_BSTAMP_WSS_DATA);
+ DUMPREG(venc, VENC_S_CARR);
+ DUMPREG(venc, VENC_LINE21);
+ DUMPREG(venc, VENC_LN_SEL);
+ DUMPREG(venc, VENC_L21__WC_CTL);
+ DUMPREG(venc, VENC_HTRIGGER_VTRIGGER);
+ DUMPREG(venc, VENC_SAVID__EAVID);
+ DUMPREG(venc, VENC_FLEN__FAL);
+ DUMPREG(venc, VENC_LAL__PHASE_RESET);
+ DUMPREG(venc, VENC_HS_INT_START_STOP_X);
+ DUMPREG(venc, VENC_HS_EXT_START_STOP_X);
+ DUMPREG(venc, VENC_VS_INT_START_X);
+ DUMPREG(venc, VENC_VS_INT_STOP_X__VS_INT_START_Y);
+ DUMPREG(venc, VENC_VS_INT_STOP_Y__VS_EXT_START_X);
+ DUMPREG(venc, VENC_VS_EXT_STOP_X__VS_EXT_START_Y);
+ DUMPREG(venc, VENC_VS_EXT_STOP_Y);
+ DUMPREG(venc, VENC_AVID_START_STOP_X);
+ DUMPREG(venc, VENC_AVID_START_STOP_Y);
+ DUMPREG(venc, VENC_FID_INT_START_X__FID_INT_START_Y);
+ DUMPREG(venc, VENC_FID_INT_OFFSET_Y__FID_EXT_START_X);
+ DUMPREG(venc, VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y);
+ DUMPREG(venc, VENC_TVDETGP_INT_START_STOP_X);
+ DUMPREG(venc, VENC_TVDETGP_INT_START_STOP_Y);
+ DUMPREG(venc, VENC_GEN_CTRL);
+ DUMPREG(venc, VENC_OUTPUT_CONTROL);
+ DUMPREG(venc, VENC_OUTPUT_TEST);
+
+ venc_runtime_put(venc);
#undef DUMPREG
+ return 0;
}
-static int venc_get_clocks(struct platform_device *pdev)
+static int venc_get_clocks(struct venc_device *venc)
{
struct clk *clk;
- if (venc.requires_tv_dac_clk) {
- clk = devm_clk_get(&pdev->dev, "tv_dac_clk");
+ if (venc->requires_tv_dac_clk) {
+ clk = devm_clk_get(&venc->pdev->dev, "tv_dac_clk");
if (IS_ERR(clk)) {
DSSERR("can't get tv_dac_clk\n");
return PTR_ERR(clk);
@@ -737,7 +755,7 @@ static int venc_get_clocks(struct platform_device *pdev)
clk = NULL;
}
- venc.tv_dac_clk = clk;
+ venc->tv_dac_clk = clk;
return 0;
}
@@ -745,14 +763,14 @@ static int venc_get_clocks(struct platform_device *pdev)
static int venc_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct venc_device *venc = dssdev_to_venc(dssdev);
int r;
- r = venc_init_regulator();
+ r = venc_init_regulator(venc);
if (r)
return r;
- r = dss_mgr_connect(channel, dssdev);
+ r = dss_mgr_connect(&venc->output, dssdev);
if (r)
return r;
@@ -760,7 +778,7 @@ static int venc_connect(struct omap_dss_device *dssdev,
if (r) {
DSSERR("failed to connect output to new device: %s\n",
dst->name);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&venc->output, dssdev);
return r;
}
@@ -770,7 +788,7 @@ static int venc_connect(struct omap_dss_device *dssdev,
static void venc_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- enum omap_channel channel = dssdev->dispc_channel;
+ struct venc_device *venc = dssdev_to_venc(dssdev);
WARN_ON(dst != dssdev->dst);
@@ -779,7 +797,7 @@ static void venc_disconnect(struct omap_dss_device *dssdev,
omapdss_output_unset_device(dssdev);
- dss_mgr_disconnect(channel, dssdev);
+ dss_mgr_disconnect(&venc->output, dssdev);
}
static const struct omapdss_atv_ops venc_ops = {
@@ -797,11 +815,11 @@ static const struct omapdss_atv_ops venc_ops = {
.get_wss = venc_get_wss,
};
-static void venc_init_output(struct platform_device *pdev)
+static void venc_init_output(struct venc_device *venc)
{
- struct omap_dss_device *out = &venc.output;
+ struct omap_dss_device *out = &venc->output;
- out->dev = &pdev->dev;
+ out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
out->output_type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
@@ -812,16 +830,14 @@ static void venc_init_output(struct platform_device *pdev)
omapdss_register_output(out);
}
-static void venc_uninit_output(struct platform_device *pdev)
+static void venc_uninit_output(struct venc_device *venc)
{
- struct omap_dss_device *out = &venc.output;
-
- omapdss_unregister_output(out);
+ omapdss_unregister_output(&venc->output);
}
-static int venc_probe_of(struct platform_device *pdev)
+static int venc_probe_of(struct venc_device *venc)
{
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node = venc->pdev->dev.of_node;
struct device_node *ep;
u32 channels;
int r;
@@ -830,24 +846,25 @@ static int venc_probe_of(struct platform_device *pdev)
if (!ep)
return 0;
- venc.invert_polarity = of_property_read_bool(ep, "ti,invert-polarity");
+ venc->invert_polarity = of_property_read_bool(ep, "ti,invert-polarity");
r = of_property_read_u32(ep, "ti,channels", &channels);
if (r) {
- dev_err(&pdev->dev,
+ dev_err(&venc->pdev->dev,
"failed to read property 'ti,channels': %d\n", r);
goto err;
}
switch (channels) {
case 1:
- venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE;
+ venc->type = OMAP_DSS_VENC_TYPE_COMPOSITE;
break;
case 2:
- venc.type = OMAP_DSS_VENC_TYPE_SVIDEO;
+ venc->type = OMAP_DSS_VENC_TYPE_SVIDEO;
break;
default:
- dev_err(&pdev->dev, "bad channel propert '%d'\n", channels);
+ dev_err(&venc->pdev->dev, "bad channel propert '%d'\n",
+ channels);
r = -EINVAL;
goto err;
}
@@ -871,65 +888,82 @@ static const struct soc_device_attribute venc_soc_devices[] = {
static int venc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ struct dss_device *dss = dss_get_device(master);
+ struct venc_device *venc;
u8 rev_id;
struct resource *venc_mem;
int r;
- venc.pdev = pdev;
+ venc = kzalloc(sizeof(*venc), GFP_KERNEL);
+ if (!venc)
+ return -ENOMEM;
+
+ venc->pdev = pdev;
+ venc->dss = dss;
+ dev_set_drvdata(dev, venc);
/* The OMAP34xx, OMAP35xx and AM35xx VENC require the TV DAC clock. */
if (soc_device_match(venc_soc_devices))
- venc.requires_tv_dac_clk = true;
+ venc->requires_tv_dac_clk = true;
- mutex_init(&venc.venc_lock);
+ mutex_init(&venc->venc_lock);
- venc.wss_data = 0;
+ venc->wss_data = 0;
- venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0);
- venc.base = devm_ioremap_resource(&pdev->dev, venc_mem);
- if (IS_ERR(venc.base))
- return PTR_ERR(venc.base);
+ venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
+ venc->base = devm_ioremap_resource(&pdev->dev, venc_mem);
+ if (IS_ERR(venc->base)) {
+ r = PTR_ERR(venc->base);
+ goto err_free;
+ }
- r = venc_get_clocks(pdev);
+ r = venc_get_clocks(venc);
if (r)
- return r;
+ goto err_free;
pm_runtime_enable(&pdev->dev);
- r = venc_runtime_get();
+ r = venc_runtime_get(venc);
if (r)
goto err_runtime_get;
- rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
+ rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff);
dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
- venc_runtime_put();
+ venc_runtime_put(venc);
- r = venc_probe_of(pdev);
+ r = venc_probe_of(venc);
if (r) {
DSSERR("Invalid DT data\n");
goto err_probe_of;
}
- dss_debugfs_create_file("venc", venc_dump_regs);
+ venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs,
+ venc);
- venc_init_output(pdev);
+ venc_init_output(venc);
return 0;
err_probe_of:
err_runtime_get:
pm_runtime_disable(&pdev->dev);
+err_free:
+ kfree(venc);
return r;
}
static void venc_unbind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct venc_device *venc = dev_get_drvdata(dev);
- venc_uninit_output(pdev);
+ dss_debugfs_remove_file(venc->debugfs);
- pm_runtime_disable(&pdev->dev);
+ venc_uninit_output(venc);
+
+ pm_runtime_disable(dev);
+
+ kfree(venc);
}
static const struct component_ops venc_component_ops = {
@@ -950,24 +984,27 @@ static int venc_remove(struct platform_device *pdev)
static int venc_runtime_suspend(struct device *dev)
{
- if (venc.tv_dac_clk)
- clk_disable_unprepare(venc.tv_dac_clk);
+ struct venc_device *venc = dev_get_drvdata(dev);
+
+ if (venc->tv_dac_clk)
+ clk_disable_unprepare(venc->tv_dac_clk);
- dispc_runtime_put();
+ dispc_runtime_put(venc->dss->dispc);
return 0;
}
static int venc_runtime_resume(struct device *dev)
{
+ struct venc_device *venc = dev_get_drvdata(dev);
int r;
- r = dispc_runtime_get();
+ r = dispc_runtime_get(venc->dss->dispc);
if (r < 0)
return r;
- if (venc.tv_dac_clk)
- clk_prepare_enable(venc.tv_dac_clk);
+ if (venc->tv_dac_clk)
+ clk_prepare_enable(venc->tv_dac_clk);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index bbedac797927..585ed94ccf17 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -64,11 +64,11 @@ static int dss_video_pll_enable(struct dss_pll *pll)
struct dss_video_pll *vpll = container_of(pll, struct dss_video_pll, pll);
int r;
- r = dss_runtime_get();
+ r = dss_runtime_get(pll->dss);
if (r)
return r;
- dss_ctrl_pll_enable(pll->id, true);
+ dss_ctrl_pll_enable(pll, true);
dss_dpll_enable_scp_clk(vpll);
@@ -82,8 +82,8 @@ static int dss_video_pll_enable(struct dss_pll *pll)
err_reset:
dss_dpll_disable_scp_clk(vpll);
- dss_ctrl_pll_enable(pll->id, false);
- dss_runtime_put();
+ dss_ctrl_pll_enable(pll, false);
+ dss_runtime_put(pll->dss);
return r;
}
@@ -96,9 +96,9 @@ static void dss_video_pll_disable(struct dss_pll *pll)
dss_dpll_disable_scp_clk(vpll);
- dss_ctrl_pll_enable(pll->id, false);
+ dss_ctrl_pll_enable(pll, false);
- dss_runtime_put();
+ dss_runtime_put(pll->dss);
}
static const struct dss_pll_ops dss_pll_ops = {
@@ -136,8 +136,9 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
.errata_i886 = true,
};
-struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
- struct regulator *regulator)
+struct dss_pll *dss_video_pll_init(struct dss_device *dss,
+ struct platform_device *pdev, int id,
+ struct regulator *regulator)
{
const char * const reg_name[] = { "pll1", "pll2" };
const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" };
@@ -190,7 +191,7 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
pll->hw = &dss_dra7_video_pll_hw;
pll->ops = &dss_pll_ops;
- r = dss_pll_register(pll);
+ r = dss_pll_register(dss, pll);
if (r)
return ERR_PTR(r);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 1b8154e58d18..6c4d40b824e4 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -113,15 +113,17 @@ static struct omap_crtc *omap_crtcs[8];
static struct omap_dss_device *omap_crtc_output[8];
/* we can probably ignore these until we support command-mode panels: */
-static int omap_crtc_dss_connect(enum omap_channel channel,
+static int omap_crtc_dss_connect(struct omap_drm_private *priv,
+ enum omap_channel channel,
struct omap_dss_device *dst)
{
- const struct dispc_ops *dispc_ops = dispc_get_ops();
+ const struct dispc_ops *dispc_ops = priv->dispc_ops;
+ struct dispc_device *dispc = priv->dispc;
if (omap_crtc_output[channel])
return -EINVAL;
- if ((dispc_ops->mgr_get_supported_outputs(channel) & dst->id) == 0)
+ if (!(dispc_ops->mgr_get_supported_outputs(dispc, channel) & dst->id))
return -EINVAL;
omap_crtc_output[channel] = dst;
@@ -130,14 +132,16 @@ static int omap_crtc_dss_connect(enum omap_channel channel,
return 0;
}
-static void omap_crtc_dss_disconnect(enum omap_channel channel,
+static void omap_crtc_dss_disconnect(struct omap_drm_private *priv,
+ enum omap_channel channel,
struct omap_dss_device *dst)
{
omap_crtc_output[channel] = NULL;
dst->dispc_channel_connected = false;
}
-static void omap_crtc_dss_start_update(enum omap_channel channel)
+static void omap_crtc_dss_start_update(struct omap_drm_private *priv,
+ enum omap_channel channel)
{
}
@@ -156,7 +160,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
return;
if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) {
- priv->dispc_ops->mgr_enable(channel, enable);
+ priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
}
@@ -169,8 +173,9 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
omap_crtc->ignore_digit_sync_lost = true;
}
- framedone_irq = priv->dispc_ops->mgr_get_framedone_irq(channel);
- vsync_irq = priv->dispc_ops->mgr_get_vsync_irq(channel);
+ framedone_irq = priv->dispc_ops->mgr_get_framedone_irq(priv->dispc,
+ channel);
+ vsync_irq = priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel);
if (enable) {
wait = omap_irq_wait_init(dev, vsync_irq, 1);
@@ -190,7 +195,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
wait = omap_irq_wait_init(dev, vsync_irq, 2);
}
- priv->dispc_ops->mgr_enable(channel, enable);
+ priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100));
@@ -207,25 +212,28 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
}
-static int omap_crtc_dss_enable(enum omap_channel channel)
+static int omap_crtc_dss_enable(struct omap_drm_private *priv,
+ enum omap_channel channel)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
- struct omap_drm_private *priv = omap_crtc->base.dev->dev_private;
- priv->dispc_ops->mgr_set_timings(omap_crtc->channel, &omap_crtc->vm);
+ priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel,
+ &omap_crtc->vm);
omap_crtc_set_enabled(&omap_crtc->base, true);
return 0;
}
-static void omap_crtc_dss_disable(enum omap_channel channel)
+static void omap_crtc_dss_disable(struct omap_drm_private *priv,
+ enum omap_channel channel)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
omap_crtc_set_enabled(&omap_crtc->base, false);
}
-static void omap_crtc_dss_set_timings(enum omap_channel channel,
+static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
+ enum omap_channel channel,
const struct videomode *vm)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
@@ -233,25 +241,26 @@ static void omap_crtc_dss_set_timings(enum omap_channel channel,
omap_crtc->vm = *vm;
}
-static void omap_crtc_dss_set_lcd_config(enum omap_channel channel,
+static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
+ enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
struct omap_crtc *omap_crtc = omap_crtcs[channel];
- struct omap_drm_private *priv = omap_crtc->base.dev->dev_private;
DBG("%s", omap_crtc->name);
- priv->dispc_ops->mgr_set_lcd_config(omap_crtc->channel, config);
+ priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
+ config);
}
static int omap_crtc_dss_register_framedone(
- enum omap_channel channel,
+ struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
return 0;
}
static void omap_crtc_dss_unregister_framedone(
- enum omap_channel channel,
+ struct omap_drm_private *priv, enum omap_channel channel,
void (*handler)(void *), void *data)
{
}
@@ -272,7 +281,7 @@ static const struct dss_mgr_ops mgr_ops = {
* Setup, Flush and Page Flip
*/
-void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus)
+void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -297,7 +306,7 @@ void omap_crtc_vblank_irq(struct drm_crtc *crtc)
* If the dispc is busy we're racing the flush operation. Try again on
* the next vblank interrupt.
*/
- if (priv->dispc_ops->mgr_go_busy(omap_crtc->channel)) {
+ if (priv->dispc_ops->mgr_go_busy(priv->dispc, omap_crtc->channel)) {
spin_unlock(&crtc->dev->event_lock);
return;
}
@@ -334,7 +343,7 @@ static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc)
info.partial_alpha_enabled = false;
info.cpr_enable = false;
- priv->dispc_ops->mgr_setup(omap_crtc->channel, &info);
+ priv->dispc_ops->mgr_setup(priv->dispc, omap_crtc->channel, &info);
}
/* -----------------------------------------------------------------------------
@@ -492,7 +501,7 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_plane_state *pri_state;
if (state->color_mgmt_changed && state->gamma_lut) {
- uint length = state->gamma_lut->length /
+ unsigned int length = state->gamma_lut->length /
sizeof(struct drm_color_lut);
if (length < 2)
@@ -526,7 +535,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
if (crtc->state->color_mgmt_changed) {
struct drm_color_lut *lut = NULL;
- uint length = 0;
+ unsigned int length = 0;
if (crtc->state->gamma_lut) {
lut = (struct drm_color_lut *)
@@ -534,7 +543,8 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
length = crtc->state->gamma_lut->length /
sizeof(*lut);
}
- priv->dispc_ops->mgr_set_gamma(omap_crtc->channel, lut, length);
+ priv->dispc_ops->mgr_set_gamma(priv->dispc, omap_crtc->channel,
+ lut, length);
}
omap_crtc_write_crtc_properties(crtc);
@@ -549,7 +559,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON(ret != 0);
spin_lock_irq(&crtc->dev->event_lock);
- priv->dispc_ops->mgr_go(omap_crtc->channel);
+ priv->dispc_ops->mgr_go(priv->dispc, omap_crtc->channel);
omap_crtc_arm_event(crtc);
spin_unlock_irq(&crtc->dev->event_lock);
}
@@ -557,7 +567,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *state,
struct drm_property *property,
- uint64_t val)
+ u64 val)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct drm_plane_state *plane_state;
@@ -585,7 +595,7 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property,
- uint64_t *val)
+ u64 *val)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc_state *omap_state = to_omap_crtc_state(state);
@@ -669,11 +679,11 @@ static const char *channel_names[] = {
[OMAP_DSS_CHANNEL_LCD3] = "lcd3",
};
-void omap_crtc_pre_init(void)
+void omap_crtc_pre_init(struct omap_drm_private *priv)
{
memset(omap_crtcs, 0, sizeof(omap_crtcs));
- dss_install_mgr_ops(&mgr_ops);
+ dss_install_mgr_ops(&mgr_ops, priv);
}
void omap_crtc_pre_uninit(void)
@@ -731,8 +741,8 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
* extracted with dispc_mgr_gamma_size(). If it returns 0
* gamma table is not supprted.
*/
- if (priv->dispc_ops->mgr_gamma_size(channel)) {
- uint gamma_lut_size = 256;
+ if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
+ unsigned int gamma_lut_size = 256;
drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
index ad7b007c6174..eaab2d7f0324 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.h
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -32,12 +32,12 @@ struct videomode;
struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
-void omap_crtc_pre_init(void);
+void omap_crtc_pre_init(struct omap_drm_private *priv);
void omap_crtc_pre_uninit(void);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
struct drm_plane *plane, struct omap_dss_device *dssdev);
int omap_crtc_wait_pending(struct drm_crtc *crtc);
-void omap_crtc_error_irq(struct drm_crtc *crtc, uint32_t irqstatus);
+void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus);
void omap_crtc_vblank_irq(struct drm_crtc *crtc);
#endif /* __OMAPDRM_CRTC_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
index 600064d5c25b..c2785cc98dc9 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_priv.h
@@ -102,10 +102,10 @@ struct pat_ctrl {
};
struct pat {
- uint32_t next_pa;
+ u32 next_pa;
struct pat_area area;
struct pat_ctrl ctrl;
- uint32_t data_pa;
+ u32 data_pa;
};
#define DMM_FIXED_RETRY_COUNT 1000
@@ -129,7 +129,7 @@ struct dmm_txn {
void *engine_handle;
struct tcm *tcm;
- uint8_t *current_va;
+ u8 *current_va;
dma_addr_t current_pa;
struct pat *last_pat;
@@ -140,7 +140,7 @@ struct refill_engine {
struct dmm *dmm;
struct tcm *tcm;
- uint8_t *refill_va;
+ u8 *refill_va;
dma_addr_t refill_pa;
/* only one trans per engine for now */
@@ -154,7 +154,7 @@ struct refill_engine {
};
struct dmm_platform_data {
- uint32_t cpu_cache_flags;
+ u32 cpu_cache_flags;
};
struct dmm {
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 4be0c94673f5..f9fa1c90b35c 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -58,11 +58,11 @@ static DEFINE_SPINLOCK(list_lock);
}
static const struct {
- uint32_t x_shft; /* unused X-bits (as part of bpp) */
- uint32_t y_shft; /* unused Y-bits (as part of bpp) */
- uint32_t cpp; /* bytes/chars per pixel */
- uint32_t slot_w; /* width of each slot (in pixels) */
- uint32_t slot_h; /* height of each slot (in pixels) */
+ u32 x_shft; /* unused X-bits (as part of bpp) */
+ u32 y_shft; /* unused Y-bits (as part of bpp) */
+ u32 cpp; /* bytes/chars per pixel */
+ u32 slot_w; /* width of each slot (in pixels) */
+ u32 slot_h; /* height of each slot (in pixels) */
} geom[TILFMT_NFORMATS] = {
[TILFMT_8BIT] = GEOM(0, 0, 1),
[TILFMT_16BIT] = GEOM(0, 1, 2),
@@ -72,7 +72,7 @@ static const struct {
/* lookup table for registers w/ per-engine instances */
-static const uint32_t reg[][4] = {
+static const u32 reg[][4] = {
[PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
[PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
@@ -111,10 +111,10 @@ static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
}
/* check status and spin until wait_mask comes true */
-static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
+static int wait_status(struct refill_engine *engine, u32 wait_mask)
{
struct dmm *dmm = engine->dmm;
- uint32_t r = 0, err, i;
+ u32 r = 0, err, i;
i = DMM_FIXED_RETRY_COUNT;
while (true) {
@@ -158,7 +158,7 @@ static void release_engine(struct refill_engine *engine)
static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
{
struct dmm *dmm = arg;
- uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
+ u32 status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
int i;
/* ack IRQ */
@@ -226,10 +226,10 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
* corresponding slot is cleared (ie. dummy_pa is programmed)
*/
static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
- struct page **pages, uint32_t npages, uint32_t roll)
+ struct page **pages, u32 npages, u32 roll)
{
dma_addr_t pat_pa = 0, data_pa = 0;
- uint32_t *data;
+ u32 *data;
struct pat *pat;
struct refill_engine *engine = txn->engine_handle;
int columns = (1 + area->x1 - area->x0);
@@ -239,7 +239,7 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
pat = alloc_dma(txn, sizeof(*pat), &pat_pa);
if (txn->last_pat)
- txn->last_pat->next_pa = (uint32_t)pat_pa;
+ txn->last_pat->next_pa = (u32)pat_pa;
pat->area = *area;
@@ -330,7 +330,7 @@ cleanup:
* DMM programming
*/
static int fill(struct tcm_area *area, struct page **pages,
- uint32_t npages, uint32_t roll, bool wait)
+ u32 npages, u32 roll, bool wait)
{
int ret = 0;
struct tcm_area slice, area_s;
@@ -378,7 +378,7 @@ static int fill(struct tcm_area *area, struct page **pages,
/* note: slots for which pages[i] == NULL are filled w/ dummy page
*/
int tiler_pin(struct tiler_block *block, struct page **pages,
- uint32_t npages, uint32_t roll, bool wait)
+ u32 npages, u32 roll, bool wait)
{
int ret;
@@ -398,8 +398,8 @@ int tiler_unpin(struct tiler_block *block)
/*
* Reserve/release
*/
-struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
- uint16_t h, uint16_t align)
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w,
+ u16 h, u16 align)
{
struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
u32 min_align = 128;
@@ -542,8 +542,8 @@ dma_addr_t tiler_ssptr(struct tiler_block *block)
block->area.p0.y * geom[block->fmt].slot_h);
}
-dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
- uint32_t x, uint32_t y)
+dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
+ u32 x, u32 y)
{
struct tcm_pt *p = &block->area.p0;
BUG_ON(!validfmt(block->fmt));
@@ -553,14 +553,14 @@ dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
(p->y * geom[block->fmt].slot_h) + y);
}
-void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
+void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h)
{
BUG_ON(!validfmt(fmt));
*w = round_up(*w, geom[fmt].slot_w);
*h = round_up(*h, geom[fmt].slot_h);
}
-uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
+u32 tiler_stride(enum tiler_fmt fmt, u32 orient)
{
BUG_ON(!validfmt(fmt));
@@ -570,19 +570,19 @@ uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
}
-size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h)
{
tiler_align(fmt, &w, &h);
return geom[fmt].cpp * w * h;
}
-size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h)
{
BUG_ON(!validfmt(fmt));
return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
}
-uint32_t tiler_get_cpu_cache_flags(void)
+u32 tiler_get_cpu_cache_flags(void)
{
return omap_dmm->plat_data->cpu_cache_flags;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
index cc78ba4fe6ab..835e6654fa82 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h
@@ -88,30 +88,30 @@ int tiler_map_show(struct seq_file *s, void *arg);
/* pin/unpin */
int tiler_pin(struct tiler_block *block, struct page **pages,
- uint32_t npages, uint32_t roll, bool wait);
+ u32 npages, u32 roll, bool wait);
int tiler_unpin(struct tiler_block *block);
/* reserve/release */
-struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h,
- uint16_t align);
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, u16 w, u16 h,
+ u16 align);
struct tiler_block *tiler_reserve_1d(size_t size);
int tiler_release(struct tiler_block *block);
/* utilities */
dma_addr_t tiler_ssptr(struct tiler_block *block);
-dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
- uint32_t x, uint32_t y);
-uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient);
-size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
-size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
-void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
-uint32_t tiler_get_cpu_cache_flags(void);
+dma_addr_t tiler_tsptr(struct tiler_block *block, u32 orient,
+ u32 x, u32 y);
+u32 tiler_stride(enum tiler_fmt fmt, u32 orient);
+size_t tiler_size(enum tiler_fmt fmt, u16 w, u16 h);
+size_t tiler_vsize(enum tiler_fmt fmt, u16 w, u16 h);
+void tiler_align(enum tiler_fmt fmt, u16 *w, u16 *h);
+u32 tiler_get_cpu_cache_flags(void);
bool dmm_is_available(void);
extern struct platform_driver omap_dmm_driver;
/* GEM bo flags -> tiler fmt */
-static inline enum tiler_fmt gem2fmt(uint32_t flags)
+static inline enum tiler_fmt gem2fmt(u32 flags)
{
switch (flags & OMAP_BO_TILED) {
case OMAP_BO_TILED_8:
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index dd68b2556f5b..3632854c2b91 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -69,7 +69,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
struct drm_device *dev = old_state->dev;
struct omap_drm_private *priv = dev->dev_private;
- priv->dispc_ops->runtime_get();
+ priv->dispc_ops->runtime_get(priv->dispc);
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
@@ -113,7 +113,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_cleanup_planes(dev, old_state);
- priv->dispc_ops->runtime_put();
+ priv->dispc_ops->runtime_put(priv->dispc);
}
static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = {
@@ -191,7 +191,7 @@ cleanup:
static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_planes = priv->dispc_ops->get_num_ovls();
+ unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0,
num_planes - 1);
@@ -205,8 +205,8 @@ static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_dss_device *dssdev = NULL;
- int num_ovls = priv->dispc_ops->get_num_ovls();
- int num_mgrs = priv->dispc_ops->get_num_mgrs();
+ int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc);
+ int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
int num_crtcs, crtc_idx, plane_idx;
int ret;
u32 plane_crtc_mask;
@@ -310,11 +310,14 @@ static int omap_modeset_init(struct drm_device *dev)
dev->mode_config.min_width = 8;
dev->mode_config.min_height = 2;
- /* note: eventually will need some cpu_is_omapXYZ() type stuff here
- * to fill in these limits properly on different OMAP generations..
+ /*
+ * Note: these values are used for multiple independent things:
+ * connector mode filtering, buffer sizes, crtc sizes...
+ * Use big enough values here to cover all use cases, and do more
+ * specific checking in the respective code paths.
*/
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
dev->mode_config.funcs = &omap_mode_config_funcs;
dev->mode_config.helper_private = &omap_mode_config_helper_funcs;
@@ -510,40 +513,26 @@ static const struct soc_device_attribute omapdrm_soc_devices[] = {
{ /* sentinel */ }
};
-static int pdev_probe(struct platform_device *pdev)
+static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
{
const struct soc_device_attribute *soc;
- struct omap_drm_private *priv;
struct drm_device *ddev;
unsigned int i;
int ret;
- DBG("%s", pdev->name);
+ DBG("%s", dev_name(dev));
- if (omapdss_is_initialized() == false)
- return -EPROBE_DEFER;
+ priv->dev = dev;
+ priv->dss = omapdss_get_dss();
+ priv->dispc = dispc_get_dispc(priv->dss);
+ priv->dispc_ops = dispc_get_ops(priv->dss);
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret) {
- dev_err(&pdev->dev, "Failed to set the DMA mask\n");
- return ret;
- }
-
- omap_crtc_pre_init();
+ omap_crtc_pre_init(priv);
ret = omap_connect_dssdevs();
if (ret)
goto err_crtc_uninit;
- /* Allocate and initialize the driver private structure. */
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- ret = -ENOMEM;
- goto err_disconnect_dssdevs;
- }
-
- priv->dispc_ops = dispc_get_ops();
-
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (unsigned int)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
@@ -552,39 +541,39 @@ static int pdev_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&priv->obj_list);
/* Allocate and initialize the DRM device. */
- ddev = drm_dev_alloc(&omap_drm_driver, &pdev->dev);
+ ddev = drm_dev_alloc(&omap_drm_driver, priv->dev);
if (IS_ERR(ddev)) {
ret = PTR_ERR(ddev);
- goto err_free_priv;
+ goto err_destroy_wq;
}
+ priv->ddev = ddev;
ddev->dev_private = priv;
- platform_set_drvdata(pdev, ddev);
/* Get memory bandwidth limits */
if (priv->dispc_ops->get_memory_bandwidth_limit)
priv->max_bandwidth =
- priv->dispc_ops->get_memory_bandwidth_limit();
+ priv->dispc_ops->get_memory_bandwidth_limit(priv->dispc);
omap_gem_init(ddev);
ret = omap_modeset_init(ddev);
if (ret) {
- dev_err(&pdev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+ dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
goto err_free_drm_dev;
}
/* Initialize vblank handling, start with all CRTCs disabled. */
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret) {
- dev_err(&pdev->dev, "could not init vblank\n");
+ dev_err(priv->dev, "could not init vblank\n");
goto err_cleanup_modeset;
}
for (i = 0; i < priv->num_crtcs; i++)
drm_crtc_vblank_off(priv->crtcs[i]);
- priv->fbdev = omap_fbdev_init(ddev);
+ omap_fbdev_init(ddev);
drm_kms_helper_poll_init(ddev);
omap_modeset_enable_external_hpd();
@@ -602,28 +591,25 @@ static int pdev_probe(struct platform_device *pdev)
err_cleanup_helpers:
omap_modeset_disable_external_hpd();
drm_kms_helper_poll_fini(ddev);
- if (priv->fbdev)
- omap_fbdev_free(ddev);
+
+ omap_fbdev_fini(ddev);
err_cleanup_modeset:
drm_mode_config_cleanup(ddev);
omap_drm_irq_uninstall(ddev);
err_free_drm_dev:
omap_gem_deinit(ddev);
drm_dev_unref(ddev);
-err_free_priv:
+err_destroy_wq:
destroy_workqueue(priv->wq);
- kfree(priv);
-err_disconnect_dssdevs:
omap_disconnect_dssdevs();
err_crtc_uninit:
omap_crtc_pre_uninit();
return ret;
}
-static int pdev_remove(struct platform_device *pdev)
+static void omapdrm_cleanup(struct omap_drm_private *priv)
{
- struct drm_device *ddev = platform_get_drvdata(pdev);
- struct omap_drm_private *priv = ddev->dev_private;
+ struct drm_device *ddev = priv->ddev;
DBG("");
@@ -632,8 +618,7 @@ static int pdev_remove(struct platform_device *pdev)
omap_modeset_disable_external_hpd();
drm_kms_helper_poll_fini(ddev);
- if (priv->fbdev)
- omap_fbdev_free(ddev);
+ omap_fbdev_fini(ddev);
drm_atomic_helper_shutdown(ddev);
@@ -645,10 +630,45 @@ static int pdev_remove(struct platform_device *pdev)
drm_dev_unref(ddev);
destroy_workqueue(priv->wq);
- kfree(priv);
omap_disconnect_dssdevs();
omap_crtc_pre_uninit();
+}
+
+static int pdev_probe(struct platform_device *pdev)
+{
+ struct omap_drm_private *priv;
+ int ret;
+
+ if (omapdss_is_initialized() == false)
+ return -EPROBE_DEFER;
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to set the DMA mask\n");
+ return ret;
+ }
+
+ /* Allocate and initialize the driver private structure. */
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ ret = omapdrm_init(priv, &pdev->dev);
+ if (ret < 0)
+ kfree(priv);
+
+ return ret;
+}
+
+static int pdev_remove(struct platform_device *pdev)
+{
+ struct omap_drm_private *priv = platform_get_drvdata(pdev);
+
+ omapdrm_cleanup(priv);
+ kfree(priv);
return 0;
}
@@ -692,7 +712,8 @@ static int omap_drm_resume_all_displays(void)
static int omap_drm_suspend(struct device *dev)
{
- struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct omap_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = priv->ddev;
drm_kms_helper_poll_disable(drm_dev);
@@ -705,7 +726,8 @@ static int omap_drm_suspend(struct device *dev)
static int omap_drm_resume(struct device *dev)
{
- struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct omap_drm_private *priv = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = priv->ddev;
drm_modeset_lock_all(drm_dev);
omap_drm_resume_all_displays();
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 0ac97fe09f9b..6eaee4df4559 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -46,8 +46,12 @@
struct omap_drm_usergart;
struct omap_drm_private {
- uint32_t omaprev;
+ struct drm_device *ddev;
+ struct device *dev;
+ u32 omaprev;
+ struct dss_device *dss;
+ struct dispc_device *dispc;
const struct dispc_ops *dispc_ops;
unsigned int num_crtcs;
@@ -81,7 +85,7 @@ struct omap_drm_private {
/* irq handling: */
spinlock_t wait_lock; /* protects the wait_list */
struct list_head wait_list; /* list of omap_irq_wait */
- uint32_t irq_mask; /* enabled irqs in addition to wait_list */
+ u32 irq_mask; /* enabled irqs in addition to wait_list */
/* memory bandwidth limit if it is needed on the platform */
unsigned int max_bandwidth;
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index b2539a90e1a4..5fd22ca73913 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -52,8 +52,8 @@ static const u32 formats[] = {
/* per-plane info for the fb: */
struct plane {
struct drm_gem_object *bo;
- uint32_t pitch;
- uint32_t offset;
+ u32 pitch;
+ u32 offset;
dma_addr_t dma_addr;
};
@@ -100,10 +100,10 @@ static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
.destroy = omap_framebuffer_destroy,
};
-static uint32_t get_linear_addr(struct plane *plane,
+static u32 get_linear_addr(struct plane *plane,
const struct drm_format_info *format, int n, int x, int y)
{
- uint32_t offset;
+ u32 offset;
offset = plane->offset
+ (x * format->cpp[n] / (n == 0 ? 1 : format->hsub))
@@ -121,9 +121,9 @@ bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb)
}
/* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */
-static uint32_t drm_rotation_to_tiler(unsigned int drm_rot)
+static u32 drm_rotation_to_tiler(unsigned int drm_rot)
{
- uint32_t orient;
+ u32 orient;
switch (drm_rot & DRM_MODE_ROTATE_MASK) {
default:
@@ -158,7 +158,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
const struct drm_format_info *format = omap_fb->format;
struct plane *plane = &omap_fb->planes[0];
- uint32_t x, y, orient = 0;
+ u32 x, y, orient = 0;
info->fourcc = fb->format->format;
@@ -177,8 +177,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
y = state->src_y >> 16;
if (omap_gem_flags(plane->bo) & OMAP_BO_TILED) {
- uint32_t w = state->src_w >> 16;
- uint32_t h = state->src_h >> 16;
+ u32 w = state->src_w >> 16;
+ u32 h = state->src_h >> 16;
orient = drm_rotation_to_tiler(state->rotation);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index fb309d19ca1b..0f66c74a54b0 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -80,15 +80,21 @@ fallback:
static struct fb_ops omap_fb_ops = {
.owner = THIS_MODULE,
- DRM_FB_HELPER_DEFAULT_OPS,
+
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_setcmap = drm_fb_helper_setcmap,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = omap_fbdev_pan_display,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+ .fb_ioctl = drm_fb_helper_ioctl,
.fb_read = drm_fb_helper_sys_read,
.fb_write = drm_fb_helper_sys_write,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
-
- .fb_pan_display = omap_fbdev_pan_display,
};
static int omap_fbdev_create(struct drm_fb_helper *helper,
@@ -188,7 +194,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
dev->mode_config.fb_base = dma_addr;
- fbi->screen_base = omap_gem_vaddr(fbdev->bo);
+ fbi->screen_buffer = omap_gem_vaddr(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = dma_addr;
fbi->fix.smem_len = fbdev->bo->size;
@@ -236,13 +242,16 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi)
}
/* initialize fbdev helper */
-struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+void omap_fbdev_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_fbdev *fbdev = NULL;
struct drm_fb_helper *helper;
int ret = 0;
+ if (!priv->num_crtcs || !priv->num_connectors)
+ return;
+
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
if (!fbdev)
goto fail;
@@ -254,10 +263,8 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
- if (ret) {
- dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ if (ret)
goto fail;
- }
ret = drm_fb_helper_single_add_all_connectors(helper);
if (ret)
@@ -269,7 +276,7 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
priv->fbdev = helper;
- return helper;
+ return;
fini:
drm_fb_helper_fini(helper);
@@ -277,12 +284,9 @@ fail:
kfree(fbdev);
dev_warn(dev->dev, "omap_fbdev_init failed\n");
- /* well, limp along without an fbdev.. maybe X11 will work? */
-
- return NULL;
}
-void omap_fbdev_free(struct drm_device *dev)
+void omap_fbdev_fini(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_fb_helper *helper = priv->fbdev;
@@ -290,14 +294,18 @@ void omap_fbdev_free(struct drm_device *dev)
DBG();
+ if (!helper)
+ return;
+
drm_fb_helper_unregister_fbi(helper);
drm_fb_helper_fini(helper);
- fbdev = to_omap_fbdev(priv->fbdev);
+ fbdev = to_omap_fbdev(helper);
/* unpin the GEM object pinned in omap_fbdev_create() */
- omap_gem_unpin(fbdev->bo);
+ if (fbdev->bo)
+ omap_gem_unpin(fbdev->bo);
/* this will free the backing object */
if (fbdev->fb)
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.h b/drivers/gpu/drm/omapdrm/omap_fbdev.h
index 1f5ba0996a1a..7dfd843f73f1 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.h
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.h
@@ -24,14 +24,13 @@ struct drm_device;
struct drm_fb_helper;
#ifdef CONFIG_DRM_FBDEV_EMULATION
-struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
-void omap_fbdev_free(struct drm_device *dev);
+void omap_fbdev_init(struct drm_device *dev);
+void omap_fbdev_fini(struct drm_device *dev);
#else
-static inline struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+static inline void omap_fbdev_init(struct drm_device *dev)
{
- return NULL;
}
-static inline void omap_fbdev_free(struct drm_device *dev)
+static inline void omap_fbdev_fini(struct drm_device *dev)
{
}
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 443469d4fa46..0faf042b82e1 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -39,13 +39,13 @@ struct omap_gem_object {
struct list_head mm_list;
- uint32_t flags;
+ u32 flags;
/** width/height for tiled formats (rounded up to slot boundaries) */
- uint16_t width, height;
+ u16 width, height;
/** roll applied when mapping to DMM */
- uint32_t roll;
+ u32 roll;
/**
* dma_addr contains the buffer DMA address. It is valid for
@@ -73,7 +73,7 @@ struct omap_gem_object {
/**
* # of users of dma_addr
*/
- uint32_t dma_addr_cnt;
+ u32 dma_addr_cnt;
/**
* If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
@@ -137,7 +137,7 @@ struct omap_drm_usergart {
*/
/** get mmap offset */
-static uint64_t mmap_offset(struct drm_gem_object *obj)
+static u64 mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
int ret;
@@ -331,14 +331,15 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
}
/* get buffer flags */
-uint32_t omap_gem_flags(struct drm_gem_object *obj)
+u32 omap_gem_flags(struct drm_gem_object *obj)
{
return to_omap_bo(obj)->flags;
}
-uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+u64 omap_gem_mmap_offset(struct drm_gem_object *obj)
{
- uint64_t offset;
+ u64 offset;
+
mutex_lock(&obj->dev->struct_mutex);
offset = mmap_offset(obj);
mutex_unlock(&obj->dev->struct_mutex);
@@ -649,7 +650,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
* into user memory. We don't have to do much here at the moment.
*/
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
+ u32 handle, u64 *offset)
{
struct drm_gem_object *obj;
int ret = 0;
@@ -675,10 +676,10 @@ fail:
*
* Call only from non-atomic contexts.
*/
-int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
+int omap_gem_roll(struct drm_gem_object *obj, u32 roll)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- uint32_t npages = obj->size >> PAGE_SHIFT;
+ u32 npages = obj->size >> PAGE_SHIFT;
int ret = 0;
if (roll > npages) {
@@ -808,7 +809,7 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr)
if (!is_contiguous(omap_obj) && priv->has_dmm) {
if (omap_obj->dma_addr_cnt == 0) {
struct page **pages;
- uint32_t npages = obj->size >> PAGE_SHIFT;
+ u32 npages = obj->size >> PAGE_SHIFT;
enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
struct tiler_block *block;
@@ -904,7 +905,7 @@ void omap_gem_unpin(struct drm_gem_object *obj)
* specified orientation and x,y offset from top-left corner of buffer
* (only valid for tiled 2d buffers)
*/
-int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
+int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
@@ -921,7 +922,7 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
}
/* Get tiler stride for the buffer (only valid for 2d tiled buffers) */
-int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient)
+int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
int ret = -EINVAL;
@@ -1003,7 +1004,8 @@ int omap_gem_resume(struct drm_device *dev)
list_for_each_entry(omap_obj, &priv->obj_list, mm_list) {
if (omap_obj->block) {
struct drm_gem_object *obj = &omap_obj->base;
- uint32_t npages = obj->size >> PAGE_SHIFT;
+ u32 npages = obj->size >> PAGE_SHIFT;
+
WARN_ON(!omap_obj->pages); /* this can't happen */
ret = tiler_pin(omap_obj->block,
omap_obj->pages, npages,
@@ -1027,7 +1029,7 @@ int omap_gem_resume(struct drm_device *dev)
void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- uint64_t off;
+ u64 off;
off = drm_vma_node_start(&obj->vma_node);
@@ -1115,7 +1117,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
- union omap_gem_size gsize, uint32_t flags)
+ union omap_gem_size gsize, u32 flags)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_gem_object *omap_obj;
@@ -1280,7 +1282,7 @@ done:
/* convenience method to construct a GEM buffer object, and userspace handle */
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
+ union omap_gem_size gsize, u32 flags, u32 *handle)
{
struct drm_gem_object *obj;
int ret;
@@ -1327,7 +1329,8 @@ void omap_gem_init(struct drm_device *dev)
/* reserve 4k aligned/wide regions for userspace mappings: */
for (i = 0; i < ARRAY_SIZE(fmts); i++) {
- uint16_t h = 1, w = PAGE_SIZE >> i;
+ u16 h = 1, w = PAGE_SIZE >> i;
+
tiler_align(fmts[i], &w, &h);
/* note: since each region is 1 4kb page wide, and minimum
* number of rows, the height ends up being the same as the
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index 35fa690b3d90..a78bde05193a 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -53,17 +53,17 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m);
/* GEM Object Creation and Deletion */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
- union omap_gem_size gsize, uint32_t flags);
+ union omap_gem_size gsize, u32 flags);
struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
struct sg_table *sgt);
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
- union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
+ union omap_gem_size gsize, u32 flags, u32 *handle);
void omap_gem_free_object(struct drm_gem_object *obj);
void *omap_gem_vaddr(struct drm_gem_object *obj);
/* Dumb Buffers Interface */
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
+ u32 handle, u64 *offset);
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -71,7 +71,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int omap_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
-uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
+u64 omap_gem_mmap_offset(struct drm_gem_object *obj);
size_t omap_gem_mmap_size(struct drm_gem_object *obj);
/* PRIME Interface */
@@ -81,7 +81,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
int omap_gem_fault(struct vm_fault *vmf);
-int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
+int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
enum dma_data_direction dir);
@@ -91,9 +91,9 @@ int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages,
bool remap);
int omap_gem_put_pages(struct drm_gem_object *obj);
-uint32_t omap_gem_flags(struct drm_gem_object *obj);
-int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, uint32_t orient,
+u32 omap_gem_flags(struct drm_gem_object *obj);
+int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient,
int x, int y, dma_addr_t *dma_addr);
-int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
+int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient);
#endif /* __OMAPDRM_GEM_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index 53ba424823b2..c85115049f86 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -20,7 +20,7 @@
struct omap_irq_wait {
struct list_head node;
wait_queue_head_t wq;
- uint32_t irqmask;
+ u32 irqmask;
int count;
};
@@ -29,7 +29,7 @@ static void omap_irq_update(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_irq_wait *wait;
- uint32_t irqmask = priv->irq_mask;
+ u32 irqmask = priv->irq_mask;
assert_spin_locked(&priv->wait_lock);
@@ -38,7 +38,7 @@ static void omap_irq_update(struct drm_device *dev)
DBG("irqmask=%08x", irqmask);
- priv->dispc_ops->write_irqenable(irqmask);
+ priv->dispc_ops->write_irqenable(priv->dispc, irqmask);
}
static void omap_irq_wait_handler(struct omap_irq_wait *wait)
@@ -48,7 +48,7 @@ static void omap_irq_wait_handler(struct omap_irq_wait *wait)
}
struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev,
- uint32_t irqmask, int count)
+ u32 irqmask, int count)
{
struct omap_drm_private *priv = dev->dev_private;
struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL);
@@ -108,7 +108,8 @@ int omap_irq_enable_vblank(struct drm_crtc *crtc)
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
- priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(channel);
+ priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+ channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
@@ -134,7 +135,8 @@ void omap_irq_disable_vblank(struct drm_crtc *crtc)
DBG("dev=%p, crtc=%u", dev, channel);
spin_lock_irqsave(&priv->wait_lock, flags);
- priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(channel);
+ priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(priv->dispc,
+ channel);
omap_irq_update(dev);
spin_unlock_irqrestore(&priv->wait_lock, flags);
}
@@ -198,9 +200,9 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
unsigned int id;
u32 irqstatus;
- irqstatus = priv->dispc_ops->read_irqstatus();
- priv->dispc_ops->clear_irqstatus(irqstatus);
- priv->dispc_ops->read_irqstatus(); /* flush posted write */
+ irqstatus = priv->dispc_ops->read_irqstatus(priv->dispc);
+ priv->dispc_ops->clear_irqstatus(priv->dispc, irqstatus);
+ priv->dispc_ops->read_irqstatus(priv->dispc); /* flush posted write */
VERB("irqs: %08x", irqstatus);
@@ -208,12 +210,12 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
struct drm_crtc *crtc = priv->crtcs[id];
enum omap_channel channel = omap_crtc_channel(crtc);
- if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(channel)) {
+ if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) {
drm_handle_vblank(dev, id);
omap_crtc_vblank_irq(crtc);
}
- if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(channel))
+ if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, channel))
omap_crtc_error_irq(crtc, irqstatus);
}
@@ -247,7 +249,7 @@ static const u32 omap_underflow_irqs[] = {
int omap_drm_irq_install(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs();
+ unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
unsigned int max_planes;
unsigned int i;
int ret;
@@ -265,13 +267,13 @@ int omap_drm_irq_install(struct drm_device *dev)
}
for (i = 0; i < num_mgrs; ++i)
- priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(i);
+ priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, i);
- priv->dispc_ops->runtime_get();
- priv->dispc_ops->clear_irqstatus(0xffffffff);
- priv->dispc_ops->runtime_put();
+ priv->dispc_ops->runtime_get(priv->dispc);
+ priv->dispc_ops->clear_irqstatus(priv->dispc, 0xffffffff);
+ priv->dispc_ops->runtime_put(priv->dispc);
- ret = priv->dispc_ops->request_irq(omap_irq_handler, dev);
+ ret = priv->dispc_ops->request_irq(priv->dispc, omap_irq_handler, dev);
if (ret < 0)
return ret;
@@ -289,5 +291,5 @@ void omap_drm_irq_uninstall(struct drm_device *dev)
dev->irq_enabled = false;
- priv->dispc_ops->free_irq(dev);
+ priv->dispc_ops->free_irq(priv->dispc, dev);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.h b/drivers/gpu/drm/omapdrm/omap_irq.h
index 606c09932bc0..9d5441468eca 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.h
+++ b/drivers/gpu/drm/omapdrm/omap_irq.h
@@ -32,7 +32,7 @@ void omap_drm_irq_uninstall(struct drm_device *dev);
int omap_drm_irq_install(struct drm_device *dev);
struct omap_irq_wait *omap_irq_wait_init(struct drm_device *dev,
- uint32_t irqmask, int count);
+ u32 irqmask, int count);
int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait,
unsigned long timeout);
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 7d789d1551a1..2899435cad6e 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -77,17 +77,17 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
&info.paddr, &info.p_uv_addr);
/* and finally, update omapdss: */
- ret = priv->dispc_ops->ovl_setup(omap_plane->id, &info,
+ ret = priv->dispc_ops->ovl_setup(priv->dispc, omap_plane->id, &info,
omap_crtc_timings(state->crtc), false,
omap_crtc_channel(state->crtc));
if (ret) {
dev_err(plane->dev->dev, "Failed to setup plane %s\n",
omap_plane->name);
- priv->dispc_ops->ovl_enable(omap_plane->id, false);
+ priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
return;
}
- priv->dispc_ops->ovl_enable(omap_plane->id, true);
+ priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, true);
}
static void omap_plane_atomic_disable(struct drm_plane *plane,
@@ -100,7 +100,7 @@ static void omap_plane_atomic_disable(struct drm_plane *plane,
plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY
? 0 : omap_plane->id;
- priv->dispc_ops->ovl_enable(omap_plane->id, false);
+ priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false);
}
static int omap_plane_atomic_check(struct drm_plane *plane,
@@ -201,7 +201,7 @@ static void omap_plane_reset(struct drm_plane *plane)
static int omap_plane_atomic_set_property(struct drm_plane *plane,
struct drm_plane_state *state,
struct drm_property *property,
- uint64_t val)
+ u64 val)
{
struct omap_drm_private *priv = plane->dev->dev_private;
@@ -216,7 +216,7 @@ static int omap_plane_atomic_set_property(struct drm_plane *plane,
static int omap_plane_atomic_get_property(struct drm_plane *plane,
const struct drm_plane_state *state,
struct drm_property *property,
- uint64_t *val)
+ u64 *val)
{
struct omap_drm_private *priv = plane->dev->dev_private;
@@ -259,7 +259,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
u32 possible_crtcs)
{
struct omap_drm_private *priv = dev->dev_private;
- unsigned int num_planes = priv->dispc_ops->get_num_ovls();
+ unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc);
struct drm_plane *plane;
struct omap_plane *omap_plane;
enum omap_plane_id id;
@@ -278,7 +278,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
if (!omap_plane)
return ERR_PTR(-ENOMEM);
- formats = priv->dispc_ops->ovl_get_color_modes(id);
+ formats = priv->dispc_ops->ovl_get_color_modes(priv->dispc, id);
for (nformats = 0; formats[nformats]; ++nformats)
;
omap_plane->id = id;
diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c
index 661362d072f7..d7f7bc9f061a 100644
--- a/drivers/gpu/drm/omapdrm/tcm-sita.c
+++ b/drivers/gpu/drm/omapdrm/tcm-sita.c
@@ -33,8 +33,8 @@ static unsigned long mask[8];
* map ptr to bitmap
* stride slots in a row
*/
-static void free_slots(unsigned long pos, uint16_t w, uint16_t h,
- unsigned long *map, uint16_t stride)
+static void free_slots(unsigned long pos, u16 w, u16 h,
+ unsigned long *map, u16 stride)
{
int i;
@@ -48,7 +48,7 @@ static void free_slots(unsigned long pos, uint16_t w, uint16_t h,
* map ptr to bitmap
* num_bits number of bits in bitmap
*/
-static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map,
+static int r2l_b2t_1d(u16 w, unsigned long *pos, unsigned long *map,
size_t num_bits)
{
unsigned long search_count = 0;
@@ -84,7 +84,7 @@ static int r2l_b2t_1d(uint16_t w, unsigned long *pos, unsigned long *map,
* num_bits = size of bitmap
* stride = bits in one row of container
*/
-static int l2r_t2b(uint16_t w, uint16_t h, uint16_t a, int16_t offset,
+static int l2r_t2b(u16 w, u16 h, u16 a, s16 offset,
unsigned long *pos, unsigned long slot_bytes,
unsigned long *map, size_t num_bits, size_t slot_stride)
{
@@ -179,7 +179,7 @@ static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
}
static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u16 align,
- int16_t offset, uint16_t slot_bytes,
+ s16 offset, u16 slot_bytes,
struct tcm_area *area)
{
unsigned long pos;
@@ -208,7 +208,7 @@ static void sita_deinit(struct tcm *tcm)
static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
{
unsigned long pos;
- uint16_t w, h;
+ u16 w, h;
pos = area->p0.x + area->p0.y * tcm->width;
if (area->is2d) {
diff --git a/drivers/gpu/drm/omapdrm/tcm.h b/drivers/gpu/drm/omapdrm/tcm.h
index d8a369a4f269..8efcda93c50d 100644
--- a/drivers/gpu/drm/omapdrm/tcm.h
+++ b/drivers/gpu/drm/omapdrm/tcm.h
@@ -65,7 +65,7 @@ struct tcm {
/* function table */
s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u16 align,
- int16_t offset, uint16_t slot_bytes,
+ s16 offset, u16 slot_bytes,
struct tcm_area *area);
s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
s32 (*free)(struct tcm *tcm, struct tcm_area *area);
@@ -129,7 +129,7 @@ static inline void tcm_deinit(struct tcm *tcm)
* allocation.
*/
static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
- u16 align, int16_t offset, uint16_t slot_bytes,
+ u16 align, s16 offset, u16 slot_bytes,
struct tcm_area *area)
{
/* perform rudimentary error checking */
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 988048ebcc22..25682ff3449a 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -108,6 +108,15 @@ config DRM_PANEL_RASPBERRYPI_TOUCHSCREEN
Pi 7" Touchscreen. To compile this driver as a module,
choose M here.
+config DRM_PANEL_RAYDIUM_RM68200
+ tristate "Raydium RM68200 720x1280 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Raydium RM68200
+ 720x1280 DSI video mode panel.
+
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 3d2a88d0e965..f26efc11d746 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
+obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index b4ec0ecff807..bd38bf4f1ba6 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -179,7 +179,7 @@ enum ili9322_input {
ILI9322_INPUT_UNKNOWN = 0xc,
};
-const char *ili9322_inputs[] = {
+static const char * const ili9322_inputs[] = {
"8 bit serial RGB through",
"8 bit serial RGB aligned",
"8 bit serial RGB dummy 320x240",
@@ -340,7 +340,7 @@ static bool ili9322_writeable_reg(struct device *dev, unsigned int reg)
return true;
}
-const struct regmap_config ili9322_regmap_config = {
+static const struct regmap_config ili9322_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x44,
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index b5e3994f0aa8..5185819c5b79 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -1,5 +1,5 @@
/*
- * rcar_du_crtc.c -- R-Car Display Unit CRTCs
+ * Generic LVDS panel driver
*
* Copyright (C) 2016 Laurent Pinchart
* Copyright (C) 2016 Renesas Electronics Corporation
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index c189cd6329c8..90f1ae4af93c 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -1,16 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics SA 2017
*
* Authors: Philippe Cornu <philippe.cornu@st.com>
* Yannick Fertre <yannick.fertre@st.com>
- *
- * License terms: GNU General Public License (GPL), version 2
*/
+
#include <drm/drmP.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <linux/backlight.h>
#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
#include <video/mipi_display.h>
#define DRV_NAME "orisetech_otm8009a"
@@ -62,6 +63,7 @@ struct otm8009a {
struct drm_panel panel;
struct backlight_device *bl_dev;
struct gpio_desc *reset_gpio;
+ struct regulator *supply;
bool prepared;
bool enabled;
};
@@ -279,6 +281,8 @@ static int otm8009a_unprepare(struct drm_panel *panel)
msleep(20);
}
+ regulator_disable(ctx->supply);
+
ctx->prepared = false;
return 0;
@@ -292,6 +296,12 @@ static int otm8009a_prepare(struct drm_panel *panel)
if (ctx->prepared)
return 0;
+ ret = regulator_enable(ctx->supply);
+ if (ret < 0) {
+ DRM_ERROR("failed to enable supply: %d\n", ret);
+ return ret;
+ }
+
if (ctx->reset_gpio) {
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
@@ -414,6 +424,13 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
+ ctx->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(ctx->supply)) {
+ ret = PTR_ERR(ctx->supply);
+ dev_err(dev, "failed to request regulator: %d\n", ret);
+ return ret;
+ }
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dev = dev;
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
new file mode 100644
index 000000000000..77593533abcd
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) STMicroelectronics SA 2017
+ *
+ * Authors: Philippe Cornu <philippe.cornu@st.com>
+ * Yannick Fertre <yannick.fertre@st.com>
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+/*** Manufacturer Command Set ***/
+#define MCS_CMD_MODE_SW 0xFE /* CMD Mode Switch */
+#define MCS_CMD1_UCS 0x00 /* User Command Set (UCS = CMD1) */
+#define MCS_CMD2_P0 0x01 /* Manufacture Command Set Page0 (CMD2 P0) */
+#define MCS_CMD2_P1 0x02 /* Manufacture Command Set Page1 (CMD2 P1) */
+#define MCS_CMD2_P2 0x03 /* Manufacture Command Set Page2 (CMD2 P2) */
+#define MCS_CMD2_P3 0x04 /* Manufacture Command Set Page3 (CMD2 P3) */
+
+/* CMD2 P0 commands (Display Options and Power) */
+#define MCS_STBCTR 0x12 /* TE1 Output Setting Zig-Zag Connection */
+#define MCS_SGOPCTR 0x16 /* Source Bias Current */
+#define MCS_SDCTR 0x1A /* Source Output Delay Time */
+#define MCS_INVCTR 0x1B /* Inversion Type */
+#define MCS_EXT_PWR_IC 0x24 /* External PWR IC Control */
+#define MCS_SETAVDD 0x27 /* PFM Control for AVDD Output */
+#define MCS_SETAVEE 0x29 /* PFM Control for AVEE Output */
+#define MCS_BT2CTR 0x2B /* DDVDL Charge Pump Control */
+#define MCS_BT3CTR 0x2F /* VGH Charge Pump Control */
+#define MCS_BT4CTR 0x34 /* VGL Charge Pump Control */
+#define MCS_VCMCTR 0x46 /* VCOM Output Level Control */
+#define MCS_SETVGN 0x52 /* VG M/S N Control */
+#define MCS_SETVGP 0x54 /* VG M/S P Control */
+#define MCS_SW_CTRL 0x5F /* Interface Control for PFM and MIPI */
+
+/* CMD2 P2 commands (GOA Timing Control) - no description in datasheet */
+#define GOA_VSTV1 0x00
+#define GOA_VSTV2 0x07
+#define GOA_VCLK1 0x0E
+#define GOA_VCLK2 0x17
+#define GOA_VCLK_OPT1 0x20
+#define GOA_BICLK1 0x2A
+#define GOA_BICLK2 0x37
+#define GOA_BICLK3 0x44
+#define GOA_BICLK4 0x4F
+#define GOA_BICLK_OPT1 0x5B
+#define GOA_BICLK_OPT2 0x60
+#define MCS_GOA_GPO1 0x6D
+#define MCS_GOA_GPO2 0x71
+#define MCS_GOA_EQ 0x74
+#define MCS_GOA_CLK_GALLON 0x7C
+#define MCS_GOA_FS_SEL0 0x7E
+#define MCS_GOA_FS_SEL1 0x87
+#define MCS_GOA_FS_SEL2 0x91
+#define MCS_GOA_FS_SEL3 0x9B
+#define MCS_GOA_BS_SEL0 0xAC
+#define MCS_GOA_BS_SEL1 0xB5
+#define MCS_GOA_BS_SEL2 0xBF
+#define MCS_GOA_BS_SEL3 0xC9
+#define MCS_GOA_BS_SEL4 0xD3
+
+/* CMD2 P3 commands (Gamma) */
+#define MCS_GAMMA_VP 0x60 /* Gamma VP1~VP16 */
+#define MCS_GAMMA_VN 0x70 /* Gamma VN1~VN16 */
+
+struct rm68200 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *supply;
+ struct backlight_device *backlight;
+ bool prepared;
+ bool enabled;
+};
+
+static const struct drm_display_mode default_mode = {
+ .clock = 52582,
+ .hdisplay = 720,
+ .hsync_start = 720 + 38,
+ .hsync_end = 720 + 38 + 8,
+ .htotal = 720 + 38 + 8 + 38,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 12,
+ .vsync_end = 1280 + 12 + 4,
+ .vtotal = 1280 + 12 + 4 + 12,
+ .vrefresh = 50,
+ .flags = 0,
+ .width_mm = 68,
+ .height_mm = 122,
+};
+
+static inline struct rm68200 *panel_to_rm68200(struct drm_panel *panel)
+{
+ return container_of(panel, struct rm68200, panel);
+}
+
+static void rm68200_dcs_write_buf(struct rm68200 *ctx, const void *data,
+ size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int err;
+
+ err = mipi_dsi_dcs_write_buffer(dsi, data, len);
+ if (err < 0)
+ DRM_ERROR_RATELIMITED("MIPI DSI DCS write buffer failed: %d\n",
+ err);
+}
+
+static void rm68200_dcs_write_cmd(struct rm68200 *ctx, u8 cmd, u8 value)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int err;
+
+ err = mipi_dsi_dcs_write(dsi, cmd, &value, 1);
+ if (err < 0)
+ DRM_ERROR_RATELIMITED("MIPI DSI DCS write failed: %d\n", err);
+}
+
+#define dcs_write_seq(ctx, seq...) \
+({ \
+ static const u8 d[] = { seq }; \
+ \
+ rm68200_dcs_write_buf(ctx, d, ARRAY_SIZE(d)); \
+})
+
+/*
+ * This panel is not able to auto-increment all cmd addresses so for some of
+ * them, we need to send them one by one...
+ */
+#define dcs_write_cmd_seq(ctx, cmd, seq...) \
+({ \
+ static const u8 d[] = { seq }; \
+ unsigned int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(d) ; i++) \
+ rm68200_dcs_write_cmd(ctx, cmd + i, d[i]); \
+})
+
+static void rm68200_init_sequence(struct rm68200 *ctx)
+{
+ /* Enter CMD2 with page 0 */
+ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD2_P0);
+ dcs_write_cmd_seq(ctx, MCS_EXT_PWR_IC, 0xC0, 0x53, 0x00);
+ dcs_write_seq(ctx, MCS_BT2CTR, 0xE5);
+ dcs_write_seq(ctx, MCS_SETAVDD, 0x0A);
+ dcs_write_seq(ctx, MCS_SETAVEE, 0x0A);
+ dcs_write_seq(ctx, MCS_SGOPCTR, 0x52);
+ dcs_write_seq(ctx, MCS_BT3CTR, 0x53);
+ dcs_write_seq(ctx, MCS_BT4CTR, 0x5A);
+ dcs_write_seq(ctx, MCS_INVCTR, 0x00);
+ dcs_write_seq(ctx, MCS_STBCTR, 0x0A);
+ dcs_write_seq(ctx, MCS_SDCTR, 0x06);
+ dcs_write_seq(ctx, MCS_VCMCTR, 0x56);
+ dcs_write_seq(ctx, MCS_SETVGN, 0xA0, 0x00);
+ dcs_write_seq(ctx, MCS_SETVGP, 0xA0, 0x00);
+ dcs_write_seq(ctx, MCS_SW_CTRL, 0x11); /* 2 data lanes, see doc */
+
+ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD2_P2);
+ dcs_write_seq(ctx, GOA_VSTV1, 0x05);
+ dcs_write_seq(ctx, 0x02, 0x0B);
+ dcs_write_seq(ctx, 0x03, 0x0F);
+ dcs_write_seq(ctx, 0x04, 0x7D, 0x00, 0x50);
+ dcs_write_cmd_seq(ctx, GOA_VSTV2, 0x05, 0x16, 0x0D, 0x11, 0x7D, 0x00,
+ 0x50);
+ dcs_write_cmd_seq(ctx, GOA_VCLK1, 0x07, 0x08, 0x01, 0x02, 0x00, 0x7D,
+ 0x00, 0x85, 0x08);
+ dcs_write_cmd_seq(ctx, GOA_VCLK2, 0x03, 0x04, 0x05, 0x06, 0x00, 0x7D,
+ 0x00, 0x85, 0x08);
+ dcs_write_seq(ctx, GOA_VCLK_OPT1, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00);
+ dcs_write_cmd_seq(ctx, GOA_BICLK1, 0x07, 0x08);
+ dcs_write_seq(ctx, 0x2D, 0x01);
+ dcs_write_seq(ctx, 0x2F, 0x02, 0x00, 0x40, 0x05, 0x08, 0x54, 0x7D,
+ 0x00);
+ dcs_write_cmd_seq(ctx, GOA_BICLK2, 0x03, 0x04, 0x05, 0x06, 0x00);
+ dcs_write_seq(ctx, 0x3D, 0x40);
+ dcs_write_seq(ctx, 0x3F, 0x05, 0x08, 0x54, 0x7D, 0x00);
+ dcs_write_seq(ctx, GOA_BICLK3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00);
+ dcs_write_seq(ctx, GOA_BICLK4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00);
+ dcs_write_seq(ctx, 0x58, 0x00, 0x00, 0x00);
+ dcs_write_seq(ctx, GOA_BICLK_OPT1, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dcs_write_seq(ctx, GOA_BICLK_OPT2, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dcs_write_seq(ctx, MCS_GOA_GPO1, 0x00, 0x00, 0x00, 0x00);
+ dcs_write_seq(ctx, MCS_GOA_GPO2, 0x00, 0x20, 0x00);
+ dcs_write_seq(ctx, MCS_GOA_EQ, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08,
+ 0x00, 0x00);
+ dcs_write_seq(ctx, MCS_GOA_CLK_GALLON, 0x00, 0x00);
+ dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL0, 0xBF, 0x02, 0x06, 0x14, 0x10,
+ 0x16, 0x12, 0x08, 0x3F);
+ dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL1, 0x3F, 0x3F, 0x3F, 0x3F, 0x0C,
+ 0x0A, 0x0E, 0x3F, 0x3F, 0x00);
+ dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL2, 0x04, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x05, 0x01, 0x3F, 0x3F, 0x0F);
+ dcs_write_cmd_seq(ctx, MCS_GOA_FS_SEL3, 0x0B, 0x0D, 0x3F, 0x3F, 0x3F,
+ 0x3F);
+ dcs_write_cmd_seq(ctx, 0xA2, 0x3F, 0x09, 0x13, 0x17, 0x11, 0x15);
+ dcs_write_cmd_seq(ctx, 0xA9, 0x07, 0x03, 0x3F);
+ dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL0, 0x3F, 0x05, 0x01, 0x17, 0x13,
+ 0x15, 0x11, 0x0F, 0x3F);
+ dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL1, 0x3F, 0x3F, 0x3F, 0x3F, 0x0B,
+ 0x0D, 0x09, 0x3F, 0x3F, 0x07);
+ dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL2, 0x03, 0x3F, 0x3F, 0x3F, 0x3F,
+ 0x02, 0x06, 0x3F, 0x3F, 0x08);
+ dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL3, 0x0C, 0x0A, 0x3F, 0x3F, 0x3F,
+ 0x3F, 0x3F, 0x0E, 0x10, 0x14);
+ dcs_write_cmd_seq(ctx, MCS_GOA_BS_SEL4, 0x12, 0x16, 0x00, 0x04, 0x3F);
+ dcs_write_seq(ctx, 0xDC, 0x02);
+ dcs_write_seq(ctx, 0xDE, 0x12);
+
+ dcs_write_seq(ctx, MCS_CMD_MODE_SW, 0x0E); /* No documentation */
+ dcs_write_seq(ctx, 0x01, 0x75);
+
+ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD2_P3);
+ dcs_write_cmd_seq(ctx, MCS_GAMMA_VP, 0x00, 0x0C, 0x12, 0x0E, 0x06,
+ 0x12, 0x0E, 0x0B, 0x15, 0x0B, 0x10, 0x07, 0x0F,
+ 0x12, 0x0C, 0x00);
+ dcs_write_cmd_seq(ctx, MCS_GAMMA_VN, 0x00, 0x0C, 0x12, 0x0E, 0x06,
+ 0x12, 0x0E, 0x0B, 0x15, 0x0B, 0x10, 0x07, 0x0F,
+ 0x12, 0x0C, 0x00);
+
+ /* Exit CMD2 */
+ dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD1_UCS);
+}
+
+static int rm68200_disable(struct drm_panel *panel)
+{
+ struct rm68200 *ctx = panel_to_rm68200(panel);
+
+ if (!ctx->enabled)
+ return 0;
+
+ backlight_disable(ctx->backlight);
+
+ ctx->enabled = false;
+
+ return 0;
+}
+
+static int rm68200_unprepare(struct drm_panel *panel)
+{
+ struct rm68200 *ctx = panel_to_rm68200(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret)
+ DRM_WARN("failed to set display off: %d\n", ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret)
+ DRM_WARN("failed to enter sleep mode: %d\n", ret);
+
+ msleep(120);
+
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(20);
+ }
+
+ regulator_disable(ctx->supply);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int rm68200_prepare(struct drm_panel *panel)
+{
+ struct rm68200 *ctx = panel_to_rm68200(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_enable(ctx->supply);
+ if (ret < 0) {
+ DRM_ERROR("failed to enable supply: %d\n", ret);
+ return ret;
+ }
+
+ if (ctx->reset_gpio) {
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ msleep(20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(100);
+ }
+
+ rm68200_init_sequence(ctx);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret)
+ return ret;
+
+ msleep(125);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret)
+ return ret;
+
+ msleep(20);
+
+ ctx->prepared = true;
+
+ return 0;
+}
+
+static int rm68200_enable(struct drm_panel *panel)
+{
+ struct rm68200 *ctx = panel_to_rm68200(panel);
+
+ if (ctx->enabled)
+ return 0;
+
+ backlight_enable(ctx->backlight);
+
+ ctx->enabled = true;
+
+ return 0;
+}
+
+static int rm68200_get_modes(struct drm_panel *panel)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(panel->connector, mode);
+
+ panel->connector->display_info.width_mm = mode->width_mm;
+ panel->connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs rm68200_drm_funcs = {
+ .disable = rm68200_disable,
+ .unprepare = rm68200_unprepare,
+ .prepare = rm68200_prepare,
+ .enable = rm68200_enable,
+ .get_modes = rm68200_get_modes,
+};
+
+static int rm68200_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct rm68200 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ dev_err(dev, "cannot get reset GPIO: %d\n", ret);
+ return ret;
+ }
+
+ ctx->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(ctx->supply)) {
+ ret = PTR_ERR(ctx->supply);
+ dev_err(dev, "cannot get regulator: %d\n", ret);
+ return ret;
+ }
+
+ ctx->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(ctx->backlight))
+ return PTR_ERR(ctx->backlight);
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &rm68200_drm_funcs;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "mipi_dsi_attach() failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rm68200_remove(struct mipi_dsi_device *dsi)
+{
+ struct rm68200 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id raydium_rm68200_of_match[] = {
+ { .compatible = "raydium,rm68200" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, raydium_rm68200_of_match);
+
+static struct mipi_dsi_driver raydium_rm68200_driver = {
+ .probe = rm68200_probe,
+ .remove = rm68200_remove,
+ .driver = {
+ .name = "panel-raydium-rm68200",
+ .of_match_table = raydium_rm68200_of_match,
+ },
+};
+module_mipi_dsi_driver(raydium_rm68200_driver);
+
+MODULE_AUTHOR("Philippe Cornu <philippe.cornu@st.com>");
+MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>");
+MODULE_DESCRIPTION("DRM Driver for Raydium RM68200 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 5591984a392b..cbf1ab404ee7 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -581,6 +581,29 @@ static const struct panel_desc auo_b133htn01 = {
},
};
+static const struct drm_display_mode auo_g104sn02_mode = {
+ .clock = 40000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 216,
+ .htotal = 800 + 40 + 216 + 128,
+ .vdisplay = 600,
+ .vsync_start = 600 + 10,
+ .vsync_end = 600 + 10 + 35,
+ .vtotal = 600 + 10 + 35 + 2,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_g104sn02 = {
+ .modes = &auo_g104sn02_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 211,
+ .height = 158,
+ },
+};
+
static const struct display_timing auo_g133han01_timings = {
.pixelclock = { 134000000, 141200000, 149000000 },
.hactive = { 1920, 1920, 1920 },
@@ -1217,6 +1240,30 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
+static const struct display_timing koe_tx31d200vm0baa_timing = {
+ .pixelclock = { 39600000, 43200000, 48000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 16, 36, 56 },
+ .hback_porch = { 16, 36, 56 },
+ .hsync_len = { 8, 8, 8 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 6, 21, 33.5 },
+ .vback_porch = { 6, 21, 33.5 },
+ .vsync_len = { 8, 8, 8 },
+ .flags = DISPLAY_FLAGS_DE_HIGH,
+};
+
+static const struct panel_desc koe_tx31d200vm0baa = {
+ .timings = &koe_tx31d200vm0baa_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 292,
+ .height = 109,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+};
+
static const struct display_timing kyo_tcg121xglp_timing = {
.pixelclock = { 52000000, 65000000, 71000000 },
.hactive = { 1024, 1024, 1024 },
@@ -1597,7 +1644,7 @@ static const struct panel_desc ontat_yx700wv03 = {
.width = 154,
.height = 83,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
@@ -1741,23 +1788,22 @@ static const struct panel_desc sharp_lq101k1ly04 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
};
-static const struct drm_display_mode sharp_lq123p1jx31_mode = {
- .clock = 252750,
- .hdisplay = 2400,
- .hsync_start = 2400 + 48,
- .hsync_end = 2400 + 48 + 32,
- .htotal = 2400 + 48 + 32 + 80,
- .vdisplay = 1600,
- .vsync_start = 1600 + 3,
- .vsync_end = 1600 + 3 + 10,
- .vtotal = 1600 + 3 + 10 + 33,
- .vrefresh = 60,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+static const struct display_timing sharp_lq123p1jx31_timing = {
+ .pixelclock = { 252750000, 252750000, 266604720 },
+ .hactive = { 2400, 2400, 2400 },
+ .hfront_porch = { 48, 48, 48 },
+ .hback_porch = { 80, 80, 84 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 1600, 1600, 1600 },
+ .vfront_porch = { 3, 3, 3 },
+ .vback_porch = { 33, 33, 120 },
+ .vsync_len = { 10, 10, 10 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
};
static const struct panel_desc sharp_lq123p1jx31 = {
- .modes = &sharp_lq123p1jx31_mode,
- .num_modes = 1,
+ .timings = &sharp_lq123p1jx31_timing,
+ .num_timings = 1,
.bpc = 8,
.size = {
.width = 259,
@@ -2049,6 +2095,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b133xtn01",
.data = &auo_b133xtn01,
}, {
+ .compatible = "auo,g104sn02",
+ .data = &auo_g104sn02,
+ }, {
.compatible = "auo,g133han01",
.data = &auo_g133han01,
}, {
@@ -2124,6 +2173,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
+ .compatible = "koe,tx31d200vm0baa",
+ .data = &koe_tx31d200vm0baa,
+ }, {
.compatible = "kyo,tcg121xglp",
.data = &kyo_tcg121xglp,
}, {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 9a9214ae0fb5..ecb35ed0eac8 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -309,7 +309,7 @@ void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct qxl_bo *bo = gem_to_qxl_bo(qxl_fb->obj);
WARN_ON(bo->shadow);
- drm_gem_object_unreference_unlocked(qxl_fb->obj);
+ drm_gem_object_put_unlocked(qxl_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(qxl_fb);
}
@@ -1215,7 +1215,7 @@ qxl_user_framebuffer_create(struct drm_device *dev,
ret = qxl_framebuffer_init(dev, qxl_fb, mode_cmd, obj, &qxl_fb_funcs);
if (ret) {
kfree(qxl_fb);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return NULL;
}
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 11085ab01374..c666b89eed5d 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -82,6 +82,6 @@ int qxl_mode_dumb_mmap(struct drm_file *file_priv,
return -ENOENT;
qobj = gem_to_qxl_bo(gobj);
*offset_p = qxl_bo_mmap_offset(qobj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 23af3e352673..338891401f35 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -95,7 +95,7 @@ static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj)
qxl_bo_kunmap(qbo);
qxl_bo_unpin(qbo);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
}
int qxl_get_handle_for_primary_fb(struct qxl_device *qdev,
@@ -316,11 +316,11 @@ out_unref:
qxl_bo_unpin(qbo);
}
if (fb && ret) {
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
drm_framebuffer_cleanup(fb);
kfree(fb);
}
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 85f546719adb..f5c1e7872e92 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -98,7 +98,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
return r;
/* drop reference from allocate - handle holds it now */
*qobj = gem_to_qxl_bo(gobj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index e8c0b1037230..e238a1a2eca1 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -121,7 +121,7 @@ static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
qobj = gem_to_qxl_bo(gobj);
ret = qxl_release_list_add(release, qobj);
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
if (ret)
return ret;
@@ -343,7 +343,7 @@ out2:
qxl_bo_unreserve(qobj);
out:
- drm_gem_object_unreference_unlocked(gobj);
+ drm_gem_object_put_unlocked(gobj);
return ret;
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index af62824ed4cc..6a30196e9d6c 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -211,13 +211,13 @@ void qxl_bo_unref(struct qxl_bo **bo)
if ((*bo) == NULL)
return;
- drm_gem_object_unreference_unlocked(&(*bo)->gem_base);
+ drm_gem_object_put_unlocked(&(*bo)->gem_base);
*bo = NULL;
}
struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
{
- drm_gem_object_reference(&bo->gem_base);
+ drm_gem_object_get(&bo->gem_base);
return bo;
}
@@ -318,7 +318,7 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_unreference_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->gem_base);
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 31dd04f6baa1..b28288a781ef 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -415,7 +415,6 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
ret = radeon_suspend_kms(drm_dev, false, false, false);
pci_save_state(pdev);
@@ -452,7 +451,6 @@ static int radeon_pmops_runtime_resume(struct device *dev)
ret = radeon_resume_kms(drm_dev, false, false);
drm_kms_helper_poll_enable(drm_dev);
- vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
return 0;
}
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 7d76ff47028d..3e8bf79bea58 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -71,10 +71,6 @@ struct rockchip_dp_device {
struct regmap *grf;
struct reset_control *rst;
- struct work_struct psr_work;
- struct mutex psr_lock;
- unsigned int psr_state;
-
const struct rockchip_dp_chip_data *data;
struct analogix_dp_device *adp;
@@ -84,28 +80,13 @@ struct rockchip_dp_device {
static void analogix_dp_psr_set(struct drm_encoder *encoder, bool enabled)
{
struct rockchip_dp_device *dp = to_dp(encoder);
+ int ret;
- if (!analogix_dp_psr_supported(dp->adp))
+ if (!analogix_dp_psr_enabled(dp->adp))
return;
DRM_DEV_DEBUG(dp->dev, "%s PSR...\n", enabled ? "Entry" : "Exit");
- mutex_lock(&dp->psr_lock);
- if (enabled)
- dp->psr_state = EDP_VSC_PSR_STATE_ACTIVE;
- else
- dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
-
- schedule_work(&dp->psr_work);
- mutex_unlock(&dp->psr_lock);
-}
-
-static void analogix_dp_psr_work(struct work_struct *work)
-{
- struct rockchip_dp_device *dp =
- container_of(work, typeof(*dp), psr_work);
- int ret;
-
ret = rockchip_drm_wait_vact_end(dp->encoder.crtc,
PSR_WAIT_LINE_FLAG_TIMEOUT_MS);
if (ret) {
@@ -113,12 +94,10 @@ static void analogix_dp_psr_work(struct work_struct *work)
return;
}
- mutex_lock(&dp->psr_lock);
- if (dp->psr_state == EDP_VSC_PSR_STATE_ACTIVE)
+ if (enabled)
analogix_dp_enable_psr(dp->adp);
else
analogix_dp_disable_psr(dp->adp);
- mutex_unlock(&dp->psr_lock);
}
static int rockchip_dp_pre_init(struct rockchip_dp_device *dp)
@@ -135,8 +114,6 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
struct rockchip_dp_device *dp = to_dp(plat_data);
int ret;
- cancel_work_sync(&dp->psr_work);
-
ret = clk_prepare_enable(dp->pclk);
if (ret < 0) {
DRM_DEV_ERROR(dp->dev, "failed to enable pclk %d\n", ret);
@@ -355,10 +332,6 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.power_off = rockchip_dp_powerdown;
dp->plat_data.get_modes = rockchip_dp_get_modes;
- mutex_init(&dp->psr_lock);
- dp->psr_state = ~EDP_VSC_PSR_STATE_ACTIVE;
- INIT_WORK(&dp->psr_work, analogix_dp_psr_work);
-
ret = rockchip_drm_psr_register(&dp->encoder, analogix_dp_psr_set);
if (ret < 0)
goto err_cleanup_encoder;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index ec999d9f15f6..c6fbdcd87c16 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -43,8 +43,6 @@
#define GRF_SOC_CON9 0x6224
#define DP_SEL_VOP_LIT BIT(12)
#define GRF_SOC_CON26 0x6268
-#define UPHY_SEL_BIT 3
-#define UPHY_SEL_MASK BIT(19)
#define DPTX_HPD_SEL (3 << 12)
#define DPTX_HPD_DEL (2 << 12)
#define DPTX_HPD_SEL_MASK (3 << 28)
@@ -394,11 +392,6 @@ static int cdn_dp_enable_phy(struct cdn_dp_device *dp, struct cdn_dp_port *port)
union extcon_property_value property;
int ret;
- ret = cdn_dp_grf_write(dp, GRF_SOC_CON26,
- (port->id << UPHY_SEL_BIT) | UPHY_SEL_MASK);
- if (ret)
- return ret;
-
if (!port->phy_enabled) {
ret = phy_power_on(port->phy);
if (ret) {
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 158e79e5062e..53d4afe15278 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -117,6 +117,8 @@ struct vop {
spinlock_t reg_lock;
/* lock vop irq reg */
spinlock_t irq_lock;
+ /* protects crtc enable/disable */
+ struct mutex vop_lock;
unsigned int irq;
@@ -517,7 +519,10 @@ static int vop_enable(struct drm_crtc *crtc)
goto err_disable_aclk;
}
- memcpy(vop->regs, vop->regsbak, vop->len);
+ spin_lock(&vop->reg_lock);
+ for (i = 0; i < vop->len; i += 4)
+ writel_relaxed(vop->regsbak[i / 4], vop->regs + i);
+
/*
* We need to make sure that all windows are disabled before we
* enable the crtc. Otherwise we might try to scan from a destroyed
@@ -527,10 +532,9 @@ static int vop_enable(struct drm_crtc *crtc)
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win = vop_win->data;
- spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, enable, 0);
- spin_unlock(&vop->reg_lock);
}
+ spin_unlock(&vop->reg_lock);
vop_cfg_done(vop);
@@ -569,6 +573,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
WARN_ON(vop->event);
+ mutex_lock(&vop->vop_lock);
drm_crtc_vblank_off(crtc);
/*
@@ -604,6 +609,7 @@ static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
clk_disable(vop->aclk);
clk_disable(vop->hclk);
pm_runtime_put(vop->dev);
+ mutex_unlock(&vop->vop_lock);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
@@ -868,10 +874,13 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
uint32_t pin_pol, val;
int ret;
+ mutex_lock(&vop->vop_lock);
+
WARN_ON(vop->event);
ret = vop_enable(crtc);
if (ret) {
+ mutex_unlock(&vop->vop_lock);
DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret);
return;
}
@@ -935,6 +944,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
VOP_REG_SET(vop, common, standby, 0);
+ mutex_unlock(&vop->vop_lock);
}
static bool vop_fs_irq_is_pending(struct vop *vop)
@@ -1137,15 +1147,14 @@ static void vop_handle_vblank(struct vop *vop)
{
struct drm_device *drm = vop->drm_dev;
struct drm_crtc *crtc = &vop->crtc;
- unsigned long flags;
- spin_lock_irqsave(&drm->event_lock, flags);
+ spin_lock(&drm->event_lock);
if (vop->event) {
drm_crtc_send_vblank_event(crtc, vop->event);
drm_crtc_vblank_put(crtc);
vop->event = NULL;
}
- spin_unlock_irqrestore(&drm->event_lock, flags);
+ spin_unlock(&drm->event_lock);
if (test_and_clear_bit(VOP_PENDING_FB_UNREF, &vop->pending))
drm_flip_work_commit(&vop->fb_unref_work, system_unbound_wq);
@@ -1156,21 +1165,20 @@ static irqreturn_t vop_isr(int irq, void *data)
struct vop *vop = data;
struct drm_crtc *crtc = &vop->crtc;
uint32_t active_irqs;
- unsigned long flags;
int ret = IRQ_NONE;
/*
* interrupt register has interrupt status, enable and clear bits, we
* must hold irq_lock to avoid a race with enable/disable_vblank().
*/
- spin_lock_irqsave(&vop->irq_lock, flags);
+ spin_lock(&vop->irq_lock);
active_irqs = VOP_INTR_GET_TYPE(vop, status, INTR_MASK);
/* Clear all active interrupt sources */
if (active_irqs)
VOP_INTR_SET_TYPE(vop, clear, active_irqs, 1);
- spin_unlock_irqrestore(&vop->irq_lock, flags);
+ spin_unlock(&vop->irq_lock);
/* This is expected for vop iommu irqs, since the irq is shared */
if (!active_irqs)
@@ -1393,7 +1401,11 @@ static int vop_initial(struct vop *vop)
usleep_range(10, 20);
reset_control_deassert(ahb_rst);
- memcpy(vop->regsbak, vop->regs, vop->len);
+ VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
+ VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
+
+ for (i = 0; i < vop->len; i += sizeof(u32))
+ vop->regsbak[i / 4] = readl_relaxed(vop->regs + i);
VOP_REG_SET(vop, misc, global_regdone_en, 1);
VOP_REG_SET(vop, common, dsp_blank, 0);
@@ -1473,15 +1485,21 @@ int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout)
{
struct vop *vop = to_vop(crtc);
unsigned long jiffies_left;
+ int ret = 0;
if (!crtc || !vop->is_enabled)
return -ENODEV;
- if (mstimeout <= 0)
- return -EINVAL;
+ mutex_lock(&vop->vop_lock);
+ if (mstimeout <= 0) {
+ ret = -EINVAL;
+ goto out;
+ }
- if (vop_line_flag_irq_is_enabled(vop))
- return -EBUSY;
+ if (vop_line_flag_irq_is_enabled(vop)) {
+ ret = -EBUSY;
+ goto out;
+ }
reinit_completion(&vop->line_flag_completion);
vop_line_flag_irq_enable(vop);
@@ -1492,10 +1510,13 @@ int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout)
if (jiffies_left == 0) {
DRM_DEV_ERROR(vop->dev, "Timeout waiting for IRQ\n");
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto out;
}
- return 0;
+out:
+ mutex_unlock(&vop->vop_lock);
+ return ret;
}
EXPORT_SYMBOL(rockchip_drm_wait_vact_end);
@@ -1545,18 +1566,11 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
spin_lock_init(&vop->reg_lock);
spin_lock_init(&vop->irq_lock);
-
- ret = devm_request_irq(dev, vop->irq, vop_isr,
- IRQF_SHARED, dev_name(dev), vop);
- if (ret)
- return ret;
-
- /* IRQ is initially disabled; it gets enabled in power_on */
- disable_irq(vop->irq);
+ mutex_init(&vop->vop_lock);
ret = vop_create_crtc(vop);
if (ret)
- goto err_enable_irq;
+ return ret;
pm_runtime_enable(&pdev->dev);
@@ -1567,13 +1581,19 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
goto err_disable_pm_runtime;
}
+ ret = devm_request_irq(dev, vop->irq, vop_isr,
+ IRQF_SHARED, dev_name(dev), vop);
+ if (ret)
+ goto err_disable_pm_runtime;
+
+ /* IRQ is initially disabled; it gets enabled in power_on */
+ disable_irq(vop->irq);
+
return 0;
err_disable_pm_runtime:
pm_runtime_disable(&pdev->dev);
vop_destroy_crtc(vop);
-err_enable_irq:
- enable_irq(vop->irq); /* To balance out the disable_irq above */
return ret;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 092ade4ff6a5..9bad54f3de38 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -42,6 +42,56 @@ static const u32 sunxi_rgb2yuv_coef[12] = {
0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
};
+/*
+ * These coefficients are taken from the A33 BSP from Allwinner.
+ *
+ * The formula is for each component, each coefficient being multiplied by
+ * 1024 and each constant being multiplied by 16:
+ * G = 1.164 * Y - 0.391 * U - 0.813 * V + 135
+ * R = 1.164 * Y + 1.596 * V - 222
+ * B = 1.164 * Y + 2.018 * U + 276
+ *
+ * This seems to be a conversion from Y[16:235] UV[16:240] to RGB[0:255],
+ * following the BT601 spec.
+ */
+static const u32 sunxi_bt601_yuv2rgb_coef[12] = {
+ 0x000004a7, 0x00001e6f, 0x00001cbf, 0x00000877,
+ 0x000004a7, 0x00000000, 0x00000662, 0x00003211,
+ 0x000004a7, 0x00000812, 0x00000000, 0x00002eb1,
+};
+
+static inline bool sun4i_backend_format_is_planar_yuv(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV444:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline bool sun4i_backend_format_is_packed_yuv422(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static inline bool sun4i_backend_format_is_yuv(uint32_t format)
+{
+ return sun4i_backend_format_is_planar_yuv(format) ||
+ sun4i_backend_format_is_packed_yuv422(format);
+}
+
static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
{
int i;
@@ -166,6 +216,61 @@ int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
return 0;
}
+static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
+ int layer, struct drm_plane *plane)
+{
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ uint32_t format = fb->format->format;
+ u32 val = SUN4I_BACKEND_IYUVCTL_EN;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
+ regmap_write(backend->engine.regs,
+ SUN4I_BACKEND_YGCOEF_REG(i),
+ sunxi_bt601_yuv2rgb_coef[i]);
+
+ /*
+ * We should do that only for a single plane, but the
+ * framebuffer's atomic_check has our back on this.
+ */
+ regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
+
+ /* TODO: Add support for the multi-planar YUV formats */
+ if (sun4i_backend_format_is_packed_yuv422(format))
+ val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
+ else
+ DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", format);
+
+ /*
+ * Allwinner seems to list the pixel sequence from right to left, while
+ * DRM lists it from left to right.
+ */
+ switch (format) {
+ case DRM_FORMAT_YUYV:
+ val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
+ break;
+ case DRM_FORMAT_YVYU:
+ val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
+ break;
+ case DRM_FORMAT_UYVY:
+ val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
+ break;
+ case DRM_FORMAT_VYUY:
+ val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
+ format);
+ }
+
+ regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
+
+ return 0;
+}
+
int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
@@ -175,6 +280,10 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
u32 val;
int ret;
+ /* Clear the YUV mode */
+ regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
+ SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
+
if (plane->state->crtc)
interlaced = plane->state->crtc->state->adjusted_mode.flags
& DRM_MODE_FLAG_INTERLACE;
@@ -186,6 +295,9 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
interlaced ? "on" : "off");
+ if (sun4i_backend_format_is_yuv(fb->format->format))
+ return sun4i_backend_update_yuv_format(backend, layer, plane);
+
ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
if (ret) {
DRM_DEBUG_DRIVER("Invalid format\n");
@@ -223,6 +335,21 @@ int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
return 0;
}
+static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
+ struct drm_framebuffer *fb,
+ dma_addr_t paddr)
+{
+ /* TODO: Add support for the multi-planar YUV formats */
+ DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
+ regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
+
+ DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
+ regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
+ fb->pitches[0] * 8);
+
+ return 0;
+}
+
int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
int layer, struct drm_plane *plane)
{
@@ -248,6 +375,9 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
*/
paddr -= PHYS_OFFSET;
+ if (sun4i_backend_format_is_yuv(fb->format->format))
+ return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
+
/* Write the 32 lower bits of the address (in bits) */
lo_paddr = paddr << 3;
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
@@ -330,6 +460,7 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
unsigned int num_planes = 0;
unsigned int num_alpha_planes = 0;
unsigned int num_frontend_planes = 0;
+ unsigned int num_yuv_planes = 0;
unsigned int current_pipe = 0;
unsigned int i;
@@ -362,6 +493,11 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
if (fb->format->has_alpha)
num_alpha_planes++;
+ if (sun4i_backend_format_is_yuv(fb->format->format)) {
+ DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
+ num_yuv_planes++;
+ }
+
DRM_DEBUG_DRIVER("Plane zpos is %d\n",
plane_state->normalized_zpos);
@@ -430,13 +566,20 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
s_state->pipe = current_pipe;
}
+ /* We can only have a single YUV plane at a time */
+ if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
+ DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
+ return -EINVAL;
+ }
+
if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
return -EINVAL;
}
- DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video\n",
- num_planes, num_alpha_planes, num_frontend_planes);
+ DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
+ num_planes, num_alpha_planes, num_frontend_planes,
+ num_yuv_planes);
return 0;
}
@@ -793,6 +936,9 @@ static const struct sun4i_backend_quirks sun7i_backend_quirks = {
static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
};
+static const struct sun4i_backend_quirks sun9i_backend_quirks = {
+};
+
static const struct of_device_id sun4i_backend_of_table[] = {
{
.compatible = "allwinner,sun4i-a10-display-backend",
@@ -814,6 +960,10 @@ static const struct of_device_id sun4i_backend_of_table[] = {
.compatible = "allwinner,sun8i-a33-display-backend",
.data = &sun8i_a33_backend_quirks,
},
+ {
+ .compatible = "allwinner,sun9i-a80-display-backend",
+ .data = &sun9i_backend_quirks,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index 52e77591186a..316f2179e9e1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -72,6 +72,7 @@
#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(x) ((x) << 15)
#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK GENMASK(11, 10)
#define SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(x) ((x) << 10)
+#define SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN BIT(2)
#define SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN BIT(1)
#define SUN4I_BACKEND_ATTCTL_REG1(l) (0x8a0 + (0x4 * (l)))
@@ -110,7 +111,23 @@
#define SUN4I_BACKEND_SPREN_REG 0x900
#define SUN4I_BACKEND_SPRFMTCTL_REG 0x908
#define SUN4I_BACKEND_SPRALPHACTL_REG 0x90c
+
#define SUN4I_BACKEND_IYUVCTL_REG 0x920
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_MASK GENMASK(14, 12)
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV444 (4 << 12)
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422 (3 << 12)
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_PLANAR_YUV444 (2 << 12)
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_PLANAR_YUV222 (1 << 12)
+#define SUN4I_BACKEND_IYUVCTL_FBFMT_PLANAR_YUV111 (0 << 12)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_MASK GENMASK(9, 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_YVYU (3 << 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_VYUY (2 << 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_YUYV (1 << 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_UYVY (0 << 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_VUYA (1 << 8)
+#define SUN4I_BACKEND_IYUVCTL_FBPS_AYUV (0 << 8)
+#define SUN4I_BACKEND_IYUVCTL_EN BIT(0)
+
#define SUN4I_BACKEND_IYUVADD_REG(c) (0x930 + (0x4 * (c)))
#define SUN4I_BACKEND_IYUVLINEWIDTH_REG(c) (0x940 + (0x4 * (c)))
@@ -149,6 +166,7 @@
#define SUN4I_BACKEND_NUM_LAYERS 4
#define SUN4I_BACKEND_NUM_ALPHA_LAYERS 1
#define SUN4I_BACKEND_NUM_FRONTEND_LAYERS 1
+#define SUN4I_BACKEND_NUM_YUV_PLANES 1
struct sun4i_backend {
struct sunxi_engine engine;
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index a0f43b81c64c..7f0705ef9f4e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -176,7 +176,13 @@ static bool sun4i_drv_node_is_frontend(struct device_node *node)
of_device_is_compatible(node, "allwinner,sun5i-a13-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun6i-a31-display-frontend") ||
of_device_is_compatible(node, "allwinner,sun7i-a20-display-frontend") ||
- of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend");
+ of_device_is_compatible(node, "allwinner,sun8i-a33-display-frontend") ||
+ of_device_is_compatible(node, "allwinner,sun9i-a80-display-frontend");
+}
+
+static bool sun4i_drv_node_is_deu(struct device_node *node)
+{
+ return of_device_is_compatible(node, "allwinner,sun9i-a80-deu");
}
static bool sun4i_drv_node_is_supported_frontend(struct device_node *node)
@@ -257,7 +263,8 @@ static int sun4i_drv_add_endpoints(struct device *dev,
* enabled frontend supported by the driver, we add it to our
* component list.
*/
- if (!sun4i_drv_node_is_frontend(node) ||
+ if (!(sun4i_drv_node_is_frontend(node) ||
+ sun4i_drv_node_is_deu(node)) ||
(sun4i_drv_node_is_supported_frontend(node) &&
of_device_is_available(node))) {
/* Add current component */
@@ -361,6 +368,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-a83t-display-engine" },
{ .compatible = "allwinner,sun8i-h3-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
+ { .compatible = "allwinner,sun9i-a80-display-engine" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 33ad377569ec..2949a3c912c1 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -134,7 +134,11 @@ static const uint32_t sun4i_backend_layer_formats[] = {
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGB888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
};
static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index be3f14d7746d..bffff4c9fbf5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -94,9 +94,64 @@ static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
}
}
+static enum drm_mode_status sun4i_lvds_encoder_mode_valid(struct drm_encoder *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(crtc);
+ struct sun4i_tcon *tcon = lvds->tcon;
+ u32 hsync = mode->hsync_end - mode->hsync_start;
+ u32 vsync = mode->vsync_end - mode->vsync_start;
+ unsigned long rate = mode->clock * 1000;
+ long rounded_rate;
+
+ DRM_DEBUG_DRIVER("Validating modes...\n");
+
+ if (hsync < 1)
+ return MODE_HSYNC_NARROW;
+
+ if (hsync > 0x3ff)
+ return MODE_HSYNC_WIDE;
+
+ if ((mode->hdisplay < 1) || (mode->htotal < 1))
+ return MODE_H_ILLEGAL;
+
+ if ((mode->hdisplay > 0x7ff) || (mode->htotal > 0xfff))
+ return MODE_BAD_HVALUE;
+
+ DRM_DEBUG_DRIVER("Horizontal parameters OK\n");
+
+ if (vsync < 1)
+ return MODE_VSYNC_NARROW;
+
+ if (vsync > 0x3ff)
+ return MODE_VSYNC_WIDE;
+
+ if ((mode->vdisplay < 1) || (mode->vtotal < 1))
+ return MODE_V_ILLEGAL;
+
+ if ((mode->vdisplay > 0x7ff) || (mode->vtotal > 0xfff))
+ return MODE_BAD_VVALUE;
+
+ DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+
+ tcon->dclk_min_div = 7;
+ tcon->dclk_max_div = 7;
+ rounded_rate = clk_round_rate(tcon->dclk, rate);
+ if (rounded_rate < rate)
+ return MODE_CLOCK_LOW;
+
+ if (rounded_rate > rate)
+ return MODE_CLOCK_HIGH;
+
+ DRM_DEBUG_DRIVER("Clock rate OK\n");
+
+ return MODE_OK;
+}
+
static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
.disable = sun4i_lvds_encoder_disable,
.enable = sun4i_lvds_encoder_enable,
+ .mode_valid = sun4i_lvds_encoder_mode_valid,
};
static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 832f8f9bc47f..a2a697a099e6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -52,10 +52,10 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
return drm_panel_get_modes(tcon->panel);
}
-static int sun4i_rgb_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
+ const struct drm_display_mode *mode)
{
- struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
+ struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(crtc);
struct sun4i_tcon *tcon = rgb->tcon;
u32 hsync = mode->hsync_end - mode->hsync_start;
u32 vsync = mode->vsync_end - mode->vsync_start;
@@ -106,7 +106,6 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
.get_modes = sun4i_rgb_get_modes,
- .mode_valid = sun4i_rgb_mode_valid,
};
static void
@@ -156,6 +155,7 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
.disable = sun4i_rgb_encoder_disable,
.enable = sun4i_rgb_encoder_enable,
+ .mode_valid = sun4i_rgb_mode_valid,
};
static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 0d6c5ed44795..1a114e380f13 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -17,6 +17,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
#include <uapi/drm/drm_mode.h>
@@ -343,6 +344,9 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
+ struct drm_panel *panel = tcon->panel;
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_info display_info = connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
@@ -400,6 +404,27 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+ /*
+ * On A20 and similar SoCs, the only way to achieve Positive Edge
+ * (Rising Edge), is setting dclk clock phase to 2/3(240°).
+ * By default TCON works in Negative Edge(Falling Edge),
+ * this is why phase is set to 0 in that case.
+ * Unfortunately there's no way to logically invert dclk through
+ * IO_POL register.
+ * The only acceptable way to work, triple checked with scope,
+ * is using clock phase set to 0° for Negative Edge and set to 240°
+ * for Positive Edge.
+ * On A33 and similar SoCs there would be a 90° phase option,
+ * but it divides also dclk by 2.
+ * Following code is a way to avoid quirks all around TCON
+ * and DOTCLOCK drivers.
+ */
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ clk_set_phase(tcon->dclk, 240);
+
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ clk_set_phase(tcon->dclk, 0);
+
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
val);
@@ -850,6 +875,7 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
struct sunxi_engine *engine;
struct device_node *remote;
struct sun4i_tcon *tcon;
+ struct reset_control *edp_rstc;
bool has_lvds_rst, has_lvds_alt, can_lvds;
int ret;
@@ -874,6 +900,20 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
return PTR_ERR(tcon->lcd_rst);
}
+ if (tcon->quirks->needs_edp_reset) {
+ edp_rstc = devm_reset_control_get_shared(dev, "edp");
+ if (IS_ERR(edp_rstc)) {
+ dev_err(dev, "Couldn't get edp reset line\n");
+ return PTR_ERR(edp_rstc);
+ }
+
+ ret = reset_control_deassert(edp_rstc);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert edp reset line\n");
+ return ret;
+ }
+ }
+
/* Make sure our TCON is reset */
ret = reset_control_reset(tcon->lcd_rst);
if (ret) {
@@ -1166,6 +1206,16 @@ static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
.has_channel_0 = true,
};
+static const struct sun4i_tcon_quirks sun9i_a80_tcon_lcd_quirks = {
+ .has_channel_0 = true,
+ .needs_edp_reset = true,
+};
+
+static const struct sun4i_tcon_quirks sun9i_a80_tcon_tv_quirks = {
+ .has_channel_1 = true,
+ .needs_edp_reset = true,
+};
+
/* sun4i_drv uses this list to check if a device node is a TCON */
const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun4i-a10-tcon", .data = &sun4i_a10_quirks },
@@ -1177,6 +1227,8 @@ const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-tv", .data = &sun8i_a83t_tv_quirks },
{ .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks },
+ { .compatible = "allwinner,sun9i-a80-tcon-lcd", .data = &sun9i_a80_tcon_lcd_quirks },
+ { .compatible = "allwinner,sun9i-a80-tcon-tv", .data = &sun9i_a80_tcon_tv_quirks },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_tcon_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 78d55e7cd2b3..d3a945b7bb60 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -176,6 +176,7 @@ struct sun4i_tcon_quirks {
bool has_channel_1; /* a33 does not have channel 1 */
bool has_lvds_alt; /* Does the LVDS clock have a parent other than the TCON clock? */
bool needs_de_be_mux; /* sun6i needs mux to select backend */
+ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index 09bba853e2a4..b5e071a49045 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -101,6 +101,7 @@ static const struct of_device_id sun6i_drc_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-drc" },
{ .compatible = "allwinner,sun6i-a31s-drc" },
{ .compatible = "allwinner,sun8i-a33-drc" },
+ { .compatible = "allwinner,sun9i-a80-drc" },
{ }
};
MODULE_DEVICE_TABLE(of, sun6i_drc_of_table);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 49df2db2ad46..71152776b04c 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -383,6 +383,12 @@ static const u32 tegra20_primary_formats[] = {
DRM_FORMAT_XRGB8888,
};
+static const u64 tegra20_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED,
+ DRM_FORMAT_MOD_INVALID
+};
+
static const u32 tegra114_primary_formats[] = {
DRM_FORMAT_ARGB4444,
DRM_FORMAT_ARGB1555,
@@ -430,6 +436,17 @@ static const u32 tegra124_primary_formats[] = {
DRM_FORMAT_BGRX8888,
};
+static const u64 tegra124_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+ DRM_FORMAT_MOD_INVALID
+};
+
static int tegra_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -596,6 +613,7 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
enum drm_plane_type type = DRM_PLANE_TYPE_PRIMARY;
struct tegra_plane *plane;
unsigned int num_formats;
+ const u64 *modifiers;
const u32 *formats;
int err;
@@ -610,10 +628,11 @@ static struct drm_plane *tegra_primary_plane_create(struct drm_device *drm,
num_formats = dc->soc->num_primary_formats;
formats = dc->soc->primary_formats;
+ modifiers = dc->soc->modifiers;
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
- num_formats, NULL, type, NULL);
+ num_formats, modifiers, type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -864,11 +883,13 @@ static const u32 tegra124_overlay_formats[] = {
static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
struct tegra_dc *dc,
- unsigned int index)
+ unsigned int index,
+ bool cursor)
{
unsigned long possible_crtcs = tegra_plane_get_possible_crtcs(drm);
struct tegra_plane *plane;
unsigned int num_formats;
+ enum drm_plane_type type;
const u32 *formats;
int err;
@@ -883,10 +904,14 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
num_formats = dc->soc->num_overlay_formats;
formats = dc->soc->overlay_formats;
+ if (!cursor)
+ type = DRM_PLANE_TYPE_OVERLAY;
+ else
+ type = DRM_PLANE_TYPE_CURSOR;
+
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
- num_formats, NULL,
- DRM_PLANE_TYPE_OVERLAY, NULL);
+ num_formats, NULL, type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -938,6 +963,7 @@ static struct drm_plane *tegra_dc_add_planes(struct drm_device *drm,
struct tegra_dc *dc)
{
struct drm_plane *planes[2], *primary;
+ unsigned int planes_num;
unsigned int i;
int err;
@@ -945,8 +971,14 @@ static struct drm_plane *tegra_dc_add_planes(struct drm_device *drm,
if (IS_ERR(primary))
return primary;
- for (i = 0; i < 2; i++) {
- planes[i] = tegra_dc_overlay_plane_create(drm, dc, 1 + i);
+ if (dc->soc->supports_cursor)
+ planes_num = 2;
+ else
+ planes_num = 1;
+
+ for (i = 0; i < planes_num; i++) {
+ planes[i] = tegra_dc_overlay_plane_create(drm, dc, 1 + i,
+ false);
if (IS_ERR(planes[i])) {
err = PTR_ERR(planes[i]);
@@ -1704,31 +1736,6 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
drm_crtc_vblank_on(crtc);
}
-static int tegra_crtc_atomic_check(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- struct tegra_atomic_state *s = to_tegra_atomic_state(state->state);
- struct tegra_dc_state *tegra = to_dc_state(state);
-
- /*
- * The display hub display clock needs to be fed by the display clock
- * with the highest frequency to ensure proper functioning of all the
- * displays.
- *
- * Note that this isn't used before Tegra186, but it doesn't hurt and
- * conditionalizing it would make the code less clean.
- */
- if (state->active) {
- if (!s->clk_disp || tegra->pclk > s->rate) {
- s->dc = to_tegra_dc(crtc);
- s->clk_disp = s->dc->clk;
- s->rate = tegra->pclk;
- }
- }
-
- return 0;
-}
-
static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -1765,7 +1772,6 @@ static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
}
static const struct drm_crtc_helper_funcs tegra_crtc_helper_funcs = {
- .atomic_check = tegra_crtc_atomic_check,
.atomic_begin = tegra_crtc_atomic_begin,
.atomic_flush = tegra_crtc_atomic_flush,
.atomic_enable = tegra_crtc_atomic_enable,
@@ -1864,6 +1870,13 @@ static int tegra_dc_init(struct host1x_client *client)
err = PTR_ERR(cursor);
goto cleanup;
}
+ } else {
+ /* dedicate one overlay to mouse cursor */
+ cursor = tegra_dc_overlay_plane_create(drm, dc, 2, true);
+ if (IS_ERR(cursor)) {
+ err = PTR_ERR(cursor);
+ goto cleanup;
+ }
}
err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
@@ -1954,6 +1967,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
.primary_formats = tegra20_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra20_overlay_formats),
.overlay_formats = tegra20_overlay_formats,
+ .modifiers = tegra20_modifiers,
};
static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
@@ -1970,6 +1984,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
.primary_formats = tegra20_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra20_overlay_formats),
.overlay_formats = tegra20_overlay_formats,
+ .modifiers = tegra20_modifiers,
};
static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
@@ -1986,6 +2001,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
.primary_formats = tegra114_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra114_overlay_formats),
.overlay_formats = tegra114_overlay_formats,
+ .modifiers = tegra20_modifiers,
};
static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
@@ -2002,6 +2018,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
.primary_formats = tegra114_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra124_overlay_formats),
.overlay_formats = tegra114_overlay_formats,
+ .modifiers = tegra124_modifiers,
};
static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
@@ -2018,6 +2035,7 @@ static const struct tegra_dc_soc_info tegra210_dc_soc_info = {
.primary_formats = tegra114_primary_formats,
.num_overlay_formats = ARRAY_SIZE(tegra114_overlay_formats),
.overlay_formats = tegra114_overlay_formats,
+ .modifiers = tegra124_modifiers,
};
static const struct tegra_windowgroup_soc tegra186_dc_wgrps[] = {
diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h
index 096a81ad6d8d..d2b50d32de4d 100644
--- a/drivers/gpu/drm/tegra/dc.h
+++ b/drivers/gpu/drm/tegra/dc.h
@@ -66,6 +66,7 @@ struct tegra_dc_soc_info {
unsigned int num_primary_formats;
const u32 *overlay_formats;
unsigned int num_overlay_formats;
+ const u64 *modifiers;
};
struct tegra_dc {
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index d50bddb2e447..e20e013151f0 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -42,6 +42,10 @@ static int tegra_atomic_check(struct drm_device *drm,
if (err < 0)
return err;
+ err = tegra_display_hub_atomic_check(drm, state);
+ if (err < 0)
+ return err;
+
err = drm_atomic_normalize_zpos(drm, state);
if (err < 0)
return err;
@@ -56,35 +60,6 @@ static int tegra_atomic_check(struct drm_device *drm,
return 0;
}
-static struct drm_atomic_state *
-tegra_atomic_state_alloc(struct drm_device *drm)
-{
- struct tegra_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
-
- if (!state || drm_atomic_state_init(drm, &state->base) < 0) {
- kfree(state);
- return NULL;
- }
-
- return &state->base;
-}
-
-static void tegra_atomic_state_clear(struct drm_atomic_state *state)
-{
- struct tegra_atomic_state *tegra = to_tegra_atomic_state(state);
-
- drm_atomic_state_default_clear(state);
- tegra->clk_disp = NULL;
- tegra->dc = NULL;
- tegra->rate = 0;
-}
-
-static void tegra_atomic_state_free(struct drm_atomic_state *state)
-{
- drm_atomic_state_default_release(state);
- kfree(state);
-}
-
static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
.fb_create = tegra_fb_create,
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -92,9 +67,6 @@ static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
#endif
.atomic_check = tegra_atomic_check,
.atomic_commit = drm_atomic_helper_commit,
- .atomic_state_alloc = tegra_atomic_state_alloc,
- .atomic_state_clear = tegra_atomic_state_clear,
- .atomic_state_free = tegra_atomic_state_free,
};
static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index 73b661ce7086..4f41aaec8530 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -42,20 +42,6 @@ struct tegra_fbdev {
};
#endif
-struct tegra_atomic_state {
- struct drm_atomic_state base;
-
- struct clk *clk_disp;
- struct tegra_dc *dc;
- unsigned long rate;
-};
-
-static inline struct tegra_atomic_state *
-to_tegra_atomic_state(struct drm_atomic_state *state)
-{
- return container_of(state, struct tegra_atomic_state, base);
-}
-
struct tegra_drm {
struct drm_device *drm;
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 001cb77e2f59..e69434909a42 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -55,6 +55,11 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
uint64_t modifier = fb->base.modifier;
switch (modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ tiling->mode = TEGRA_BO_TILING_MODE_PITCH;
+ tiling->value = 0;
+ break;
+
case DRM_FORMAT_MOD_NVIDIA_TEGRA_TILED:
tiling->mode = TEGRA_BO_TILING_MODE_TILED;
tiling->value = 0;
@@ -91,9 +96,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
break;
default:
- /* TODO: handle YUV formats? */
- *tiling = fb->planes[0]->tiling;
- break;
+ return -EINVAL;
}
return 0;
@@ -224,12 +227,28 @@ unreference:
}
#ifdef CONFIG_DRM_FBDEV_EMULATION
+static int tegra_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct tegra_bo *bo;
+ int err;
+
+ bo = tegra_fb_get_plane(helper->fb, 0);
+
+ err = drm_gem_mmap_obj(&bo->gem, bo->gem.size, vma);
+ if (err < 0)
+ return err;
+
+ return __tegra_gem_mmap(&bo->gem, vma);
+}
+
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
DRM_FB_HELPER_DEFAULT_OPS,
.fb_fillrect = drm_fb_helper_sys_fillrect,
.fb_copyarea = drm_fb_helper_sys_copyarea,
.fb_imageblit = drm_fb_helper_sys_imageblit,
+ .fb_mmap = tegra_fb_mmap,
};
static int tegra_fbdev_probe(struct drm_fb_helper *helper,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 49b9bf28f872..8b0b4ff64bb4 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -203,6 +203,8 @@ free:
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
{
if (bo->pages) {
+ dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
+ DMA_BIDIRECTIONAL);
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
sg_free_table(bo->sgt);
kfree(bo->sgt);
@@ -213,8 +215,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
- struct scatterlist *s;
- unsigned int i;
+ int err;
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
@@ -223,27 +224,26 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
bo->num_pages = bo->gem.size >> PAGE_SHIFT;
bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
- if (IS_ERR(bo->sgt))
+ if (IS_ERR(bo->sgt)) {
+ err = PTR_ERR(bo->sgt);
goto put_pages;
+ }
- /*
- * Fake up the SG table so that dma_sync_sg_for_device() can be used
- * to flush the pages associated with it.
- *
- * TODO: Replace this by drm_clflash_sg() once it can be implemented
- * without relying on symbols that are not exported.
- */
- for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
- sg_dma_address(s) = sg_phys(s);
-
- dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_TO_DEVICE);
+ err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
+ DMA_BIDIRECTIONAL);
+ if (err == 0) {
+ err = -EFAULT;
+ goto free_sgt;
+ }
return 0;
+free_sgt:
+ sg_free_table(bo->sgt);
+ kfree(bo->sgt);
put_pages:
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
- return PTR_ERR(bo->sgt);
+ return err;
}
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
@@ -459,8 +459,7 @@ const struct vm_operations_struct tegra_bo_vm_ops = {
.close = drm_gem_vm_close,
};
-static int tegra_gem_mmap(struct drm_gem_object *gem,
- struct vm_area_struct *vma)
+int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma)
{
struct tegra_bo *bo = to_tegra_bo(gem);
@@ -507,7 +506,7 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
gem = vma->vm_private_data;
- return tegra_gem_mmap(gem, vma);
+ return __tegra_gem_mmap(gem, vma);
}
static struct sg_table *
@@ -569,6 +568,34 @@ static void tegra_gem_prime_release(struct dma_buf *buf)
drm_gem_dmabuf_release(buf);
}
+static int tegra_gem_prime_begin_cpu_access(struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct drm_device *drm = gem->dev;
+
+ if (bo->pages)
+ dma_sync_sg_for_cpu(drm->dev, bo->sgt->sgl, bo->sgt->nents,
+ DMA_FROM_DEVICE);
+
+ return 0;
+}
+
+static int tegra_gem_prime_end_cpu_access(struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct drm_gem_object *gem = buf->priv;
+ struct tegra_bo *bo = to_tegra_bo(gem);
+ struct drm_device *drm = gem->dev;
+
+ if (bo->pages)
+ dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
+ DMA_TO_DEVICE);
+
+ return 0;
+}
+
static void *tegra_gem_prime_kmap_atomic(struct dma_buf *buf,
unsigned long page)
{
@@ -600,7 +627,7 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
if (err < 0)
return err;
- return tegra_gem_mmap(gem, vma);
+ return __tegra_gem_mmap(gem, vma);
}
static void *tegra_gem_prime_vmap(struct dma_buf *buf)
@@ -619,6 +646,8 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.map_dma_buf = tegra_gem_prime_map_dma_buf,
.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
.release = tegra_gem_prime_release,
+ .begin_cpu_access = tegra_gem_prime_begin_cpu_access,
+ .end_cpu_access = tegra_gem_prime_end_cpu_access,
.map_atomic = tegra_gem_prime_kmap_atomic,
.unmap_atomic = tegra_gem_prime_kunmap_atomic,
.map = tegra_gem_prime_kmap,
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 8eb9fd24ef0e..6bd7dd7e55b4 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -68,10 +68,11 @@ void tegra_bo_free_object(struct drm_gem_object *gem);
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
-int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
-
extern const struct vm_operations_struct tegra_bo_vm_ops;
+int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma);
+int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
+
struct dma_buf *tegra_gem_prime_export(struct drm_device *drm,
struct drm_gem_object *gem,
int flags);
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index e10a47d57313..9a3f23d4780f 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -49,6 +49,17 @@ static const u32 tegra_shared_plane_formats[] = {
DRM_FORMAT_YUV422,
};
+static const u64 tegra_shared_plane_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
+ DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
+ DRM_FORMAT_MOD_INVALID
+};
+
static inline unsigned int tegra_plane_offset(struct tegra_plane *plane,
unsigned int offset)
{
@@ -527,6 +538,7 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
unsigned int possible_crtcs = 0x7;
struct tegra_shared_plane *plane;
unsigned int num_formats;
+ const u64 *modifiers;
struct drm_plane *p;
const u32 *formats;
int err;
@@ -545,10 +557,11 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
num_formats = ARRAY_SIZE(tegra_shared_plane_formats);
formats = tegra_shared_plane_formats;
+ modifiers = tegra_shared_plane_modifiers;
err = drm_universal_plane_init(drm, p, possible_crtcs,
&tegra_plane_funcs, formats,
- num_formats, NULL, type, NULL);
+ num_formats, modifiers, type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
@@ -560,6 +573,89 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
return p;
}
+static struct drm_private_state *
+tegra_display_hub_duplicate_state(struct drm_private_obj *obj)
+{
+ struct tegra_display_hub_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void tegra_display_hub_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct tegra_display_hub_state *hub_state =
+ to_tegra_display_hub_state(state);
+
+ kfree(hub_state);
+}
+
+static const struct drm_private_state_funcs tegra_display_hub_state_funcs = {
+ .atomic_duplicate_state = tegra_display_hub_duplicate_state,
+ .atomic_destroy_state = tegra_display_hub_destroy_state,
+};
+
+static struct tegra_display_hub_state *
+tegra_display_hub_get_state(struct tegra_display_hub *hub,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = dev_get_drvdata(hub->client.parent);
+ struct drm_private_state *priv;
+
+ WARN_ON(!drm_modeset_is_locked(&drm->mode_config.connection_mutex));
+
+ priv = drm_atomic_get_private_obj_state(state, &hub->base);
+ if (IS_ERR(priv))
+ return ERR_CAST(priv);
+
+ return to_tegra_display_hub_state(priv);
+}
+
+int tegra_display_hub_atomic_check(struct drm_device *drm,
+ struct drm_atomic_state *state)
+{
+ struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_display_hub_state *hub_state;
+ struct drm_crtc_state *old, *new;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ if (!tegra->hub)
+ return 0;
+
+ hub_state = tegra_display_hub_get_state(tegra->hub, state);
+ if (IS_ERR(hub_state))
+ return PTR_ERR(hub_state);
+
+ /*
+ * The display hub display clock needs to be fed by the display clock
+ * with the highest frequency to ensure proper functioning of all the
+ * displays.
+ *
+ * Note that this isn't used before Tegra186, but it doesn't hurt and
+ * conditionalizing it would make the code less clean.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old, new, i) {
+ struct tegra_dc_state *dc = to_dc_state(new);
+
+ if (new->active) {
+ if (!hub_state->clk || dc->pclk > hub_state->rate) {
+ hub_state->dc = to_tegra_dc(dc->base.crtc);
+ hub_state->clk = hub_state->dc->clk;
+ hub_state->rate = dc->pclk;
+ }
+ }
+ }
+
+ return 0;
+}
+
static void tegra_display_hub_update(struct tegra_dc *dc)
{
u32 value;
@@ -585,26 +681,28 @@ static void tegra_display_hub_update(struct tegra_dc *dc)
void tegra_display_hub_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state)
{
- struct tegra_atomic_state *s = to_tegra_atomic_state(state);
struct tegra_drm *tegra = drm->dev_private;
struct tegra_display_hub *hub = tegra->hub;
+ struct tegra_display_hub_state *hub_state;
struct device *dev = hub->client.dev;
int err;
- if (s->clk_disp) {
- err = clk_set_rate(s->clk_disp, s->rate);
+ hub_state = tegra_display_hub_get_state(hub, state);
+
+ if (hub_state->clk) {
+ err = clk_set_rate(hub_state->clk, hub_state->rate);
if (err < 0)
dev_err(dev, "failed to set rate of %pC to %lu Hz\n",
- s->clk_disp, s->rate);
+ hub_state->clk, hub_state->rate);
- err = clk_set_parent(hub->clk_disp, s->clk_disp);
+ err = clk_set_parent(hub->clk_disp, hub_state->clk);
if (err < 0)
dev_err(dev, "failed to set parent of %pC to %pC: %d\n",
- hub->clk_disp, s->clk_disp, err);
+ hub->clk_disp, hub_state->clk, err);
}
- if (s->dc)
- tegra_display_hub_update(s->dc);
+ if (hub_state->dc)
+ tegra_display_hub_update(hub_state->dc);
}
static int tegra_display_hub_init(struct host1x_client *client)
@@ -612,6 +710,14 @@ static int tegra_display_hub_init(struct host1x_client *client)
struct tegra_display_hub *hub = to_tegra_display_hub(client);
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
+ struct tegra_display_hub_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ drm_atomic_private_obj_init(&hub->base, &state->base,
+ &tegra_display_hub_state_funcs);
tegra->hub = hub;
@@ -623,6 +729,7 @@ static int tegra_display_hub_exit(struct host1x_client *client)
struct drm_device *drm = dev_get_drvdata(client->parent);
struct tegra_drm *tegra = drm->dev_private;
+ drm_atomic_private_obj_fini(&tegra->hub->base);
tegra->hub = NULL;
return 0;
diff --git a/drivers/gpu/drm/tegra/hub.h b/drivers/gpu/drm/tegra/hub.h
index 890a47cd05c3..85b8bf41a395 100644
--- a/drivers/gpu/drm/tegra/hub.h
+++ b/drivers/gpu/drm/tegra/hub.h
@@ -41,6 +41,7 @@ struct tegra_display_hub_soc {
};
struct tegra_display_hub {
+ struct drm_private_obj base;
struct host1x_client client;
struct clk *clk_disp;
struct clk *clk_dsc;
@@ -57,6 +58,20 @@ to_tegra_display_hub(struct host1x_client *client)
return container_of(client, struct tegra_display_hub, client);
}
+struct tegra_display_hub_state {
+ struct drm_private_state base;
+
+ struct tegra_dc *dc;
+ unsigned long rate;
+ struct clk *clk;
+};
+
+static inline struct tegra_display_hub_state *
+to_tegra_display_hub_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct tegra_display_hub_state, base);
+}
+
struct tegra_dc;
struct tegra_plane;
@@ -68,6 +83,8 @@ struct drm_plane *tegra_shared_plane_create(struct drm_device *drm,
unsigned int wgrp,
unsigned int index);
+int tegra_display_hub_atomic_check(struct drm_device *drm,
+ struct drm_atomic_state *state);
void tegra_display_hub_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index a056fbf83b53..6d6e2d0091eb 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -68,6 +68,21 @@ static void tegra_plane_atomic_destroy_state(struct drm_plane *plane,
kfree(state);
}
+static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
+ uint32_t format,
+ uint64_t modifier)
+{
+ const struct drm_format_info *info = drm_format_info(format);
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (info->num_planes == 1)
+ return true;
+
+ return false;
+}
+
const struct drm_plane_funcs tegra_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -75,6 +90,7 @@ const struct drm_plane_funcs tegra_plane_funcs = {
.reset = tegra_plane_reset,
.atomic_duplicate_state = tegra_plane_atomic_duplicate_state,
.atomic_destroy_state = tegra_plane_atomic_destroy_state,
+ .format_mod_supported = tegra_plane_format_mod_supported,
};
int tegra_plane_state_add(struct tegra_plane *plane,
@@ -296,8 +312,8 @@ int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
return -EINVAL;
}
-unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
- struct tegra_plane *other)
+static unsigned int tegra_plane_get_overlap_index(struct tegra_plane *plane,
+ struct tegra_plane *other)
{
unsigned int index = 0, i;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1f730b3f18e5..2ebbae6067ab 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -255,6 +255,54 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
return 0;
}
+#ifdef CONFIG_X86
+#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
+#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
+#else
+#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
+#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
+#endif
+
+
+/**
+ * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
+ * specified page protection.
+ *
+ * @page: The page to map.
+ * @prot: The page protection.
+ *
+ * This function maps a TTM page using the kmap_atomic api if available,
+ * otherwise falls back to vmap. The user must make sure that the
+ * specified page does not have an aliased mapping with a different caching
+ * policy unless the architecture explicitly allows it. Also mapping and
+ * unmapping using this api must be correctly nested. Unmapping should
+ * occur in the reverse order of mapping.
+ */
+void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
+{
+ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
+ return kmap_atomic(page);
+ else
+ return __ttm_kmap_atomic_prot(page, prot);
+}
+EXPORT_SYMBOL(ttm_kmap_atomic_prot);
+
+/**
+ * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
+ * ttm_kmap_atomic_prot.
+ *
+ * @addr: The virtual address from the map.
+ * @prot: The page protection.
+ */
+void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
+{
+ if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
+ kunmap_atomic(addr);
+ else
+ __ttm_kunmap_atomic(addr);
+}
+EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
+
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
@@ -266,28 +314,13 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
-
-#ifdef CONFIG_X86
- dst = kmap_atomic_prot(d, prot);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- dst = vmap(&d, 1, 0, prot);
- else
- dst = kmap(d);
-#endif
+ dst = ttm_kmap_atomic_prot(d, prot);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
-#ifdef CONFIG_X86
- kunmap_atomic(dst);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- vunmap(dst);
- else
- kunmap(d);
-#endif
+ ttm_kunmap_atomic_prot(dst, prot);
return 0;
}
@@ -303,27 +336,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
-#ifdef CONFIG_X86
- src = kmap_atomic_prot(s, prot);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- src = vmap(&s, 1, 0, prot);
- else
- src = kmap(s);
-#endif
+ src = ttm_kmap_atomic_prot(s, prot);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
-#ifdef CONFIG_X86
- kunmap_atomic(src);
-#else
- if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
- vunmap(src);
- else
- kunmap(s);
-#endif
+ ttm_kunmap_atomic_prot(src, prot);
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index ce1e3b9e14c9..bf4667481935 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -643,9 +643,12 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
struct drm_plane *plane;
+ struct vc4_plane_state *vc4_plane_state;
bool debug_dump_regs = false;
+ bool enable_bg_fill = false;
u32 __iomem *dlist_start = vc4->hvs->dlist + vc4_state->mm.start;
u32 __iomem *dlist_next = dlist_start;
@@ -656,6 +659,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
/* Copy all the active planes' dlist contents to the hardware dlist. */
drm_atomic_crtc_for_each_plane(plane, crtc) {
+ /* Is this the first active plane? */
+ if (dlist_next == dlist_start) {
+ /* We need to enable background fill when a plane
+ * could be alpha blending from the background, i.e.
+ * where no other plane is underneath. It suffices to
+ * consider the first active plane here since we set
+ * needs_bg_fill such that either the first plane
+ * already needs it or all planes on top blend from
+ * the first or a lower plane.
+ */
+ vc4_plane_state = to_vc4_plane_state(plane->state);
+ enable_bg_fill = vc4_plane_state->needs_bg_fill;
+ }
+
dlist_next += vc4_plane_write_dlist(plane, dlist_next);
}
@@ -664,6 +681,14 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc,
WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size);
+ if (enable_bg_fill)
+ /* This sets a black background color fill, as is the case
+ * with other DRM drivers.
+ */
+ HVS_WRITE(SCALER_DISPBKGNDX(vc4_crtc->channel),
+ HVS_READ(SCALER_DISPBKGNDX(vc4_crtc->channel)) |
+ SCALER_DISPBKGND_FILL);
+
/* Only update DISPLIST if the CRTC was already running and is not
* being disabled.
* vc4_crtc_enable() takes care of updating the dlist just after
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index fefa1664a9f5..1b4cd1fabf56 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -310,6 +310,66 @@ to_vc4_plane(struct drm_plane *plane)
return (struct vc4_plane *)plane;
}
+enum vc4_scaling_mode {
+ VC4_SCALING_NONE,
+ VC4_SCALING_TPZ,
+ VC4_SCALING_PPF,
+};
+
+struct vc4_plane_state {
+ struct drm_plane_state base;
+ /* System memory copy of the display list for this element, computed
+ * at atomic_check time.
+ */
+ u32 *dlist;
+ u32 dlist_size; /* Number of dwords allocated for the display list */
+ u32 dlist_count; /* Number of used dwords in the display list. */
+
+ /* Offset in the dlist to various words, for pageflip or
+ * cursor updates.
+ */
+ u32 pos0_offset;
+ u32 pos2_offset;
+ u32 ptr0_offset;
+
+ /* Offset where the plane's dlist was last stored in the
+ * hardware at vc4_crtc_atomic_flush() time.
+ */
+ u32 __iomem *hw_dlist;
+
+ /* Clipped coordinates of the plane on the display. */
+ int crtc_x, crtc_y, crtc_w, crtc_h;
+ /* Clipped area being scanned from in the FB. */
+ u32 src_x, src_y;
+
+ u32 src_w[2], src_h[2];
+
+ /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
+ enum vc4_scaling_mode x_scaling[2], y_scaling[2];
+ bool is_unity;
+ bool is_yuv;
+
+ /* Offset to start scanning out from the start of the plane's
+ * BO.
+ */
+ u32 offsets[3];
+
+ /* Our allocation in LBM for temporary storage during scaling. */
+ struct drm_mm_node lbm;
+
+ /* Set when the plane has per-pixel alpha content or does not cover
+ * the entire screen. This is a hint to the CRTC that it might need
+ * to enable background color fill.
+ */
+ bool needs_bg_fill;
+};
+
+static inline struct vc4_plane_state *
+to_vc4_plane_state(struct drm_plane_state *state)
+{
+ return (struct vc4_plane_state *)state;
+}
+
enum vc4_encoder_type {
VC4_ENCODER_TYPE_NONE,
VC4_ENCODER_TYPE_HDMI,
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index c4c7af11fec5..ce39390be389 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -27,60 +27,6 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-enum vc4_scaling_mode {
- VC4_SCALING_NONE,
- VC4_SCALING_TPZ,
- VC4_SCALING_PPF,
-};
-
-struct vc4_plane_state {
- struct drm_plane_state base;
- /* System memory copy of the display list for this element, computed
- * at atomic_check time.
- */
- u32 *dlist;
- u32 dlist_size; /* Number of dwords allocated for the display list */
- u32 dlist_count; /* Number of used dwords in the display list. */
-
- /* Offset in the dlist to various words, for pageflip or
- * cursor updates.
- */
- u32 pos0_offset;
- u32 pos2_offset;
- u32 ptr0_offset;
-
- /* Offset where the plane's dlist was last stored in the
- * hardware at vc4_crtc_atomic_flush() time.
- */
- u32 __iomem *hw_dlist;
-
- /* Clipped coordinates of the plane on the display. */
- int crtc_x, crtc_y, crtc_w, crtc_h;
- /* Clipped area being scanned from in the FB. */
- u32 src_x, src_y;
-
- u32 src_w[2], src_h[2];
-
- /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
- enum vc4_scaling_mode x_scaling[2], y_scaling[2];
- bool is_unity;
- bool is_yuv;
-
- /* Offset to start scanning out from the start of the plane's
- * BO.
- */
- u32 offsets[3];
-
- /* Our allocation in LBM for temporary storage during scaling. */
- struct drm_mm_node lbm;
-};
-
-static inline struct vc4_plane_state *
-to_vc4_plane_state(struct drm_plane_state *state)
-{
- return (struct vc4_plane_state *)state;
-}
-
static const struct hvs_format {
u32 drm; /* DRM_FORMAT_* */
u32 hvs; /* HVS_FORMAT_* */
@@ -521,6 +467,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
u32 ctl0_offset = vc4_state->dlist_count;
const struct hvs_format *format = vc4_get_hvs_format(fb->format->format);
int num_planes = drm_format_num_planes(format->drm);
+ bool covers_screen;
u32 scl0, scl1, pitch0;
u32 lbm_size, tiling;
unsigned long irqflags;
@@ -618,13 +565,14 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
SCALER_POS1_SCL_HEIGHT));
}
- /* Position Word 2: Source Image Size, Alpha Mode */
+ /* Position Word 2: Source Image Size, Alpha */
vc4_state->pos2_offset = vc4_state->dlist_count;
vc4_dlist_write(vc4_state,
VC4_SET_FIELD(fb->format->has_alpha ?
SCALER_POS2_ALPHA_MODE_PIPELINE :
SCALER_POS2_ALPHA_MODE_FIXED,
SCALER_POS2_ALPHA_MODE) |
+ (fb->format->has_alpha ? SCALER_POS2_ALPHA_PREMULT : 0) |
VC4_SET_FIELD(vc4_state->src_w[0], SCALER_POS2_WIDTH) |
VC4_SET_FIELD(vc4_state->src_h[0], SCALER_POS2_HEIGHT));
@@ -700,6 +648,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
vc4_state->dlist[ctl0_offset] |=
VC4_SET_FIELD(vc4_state->dlist_count, SCALER_CTL0_SIZE);
+ /* crtc_* are already clipped coordinates. */
+ covers_screen = vc4_state->crtc_x == 0 && vc4_state->crtc_y == 0 &&
+ vc4_state->crtc_w == state->crtc->mode.hdisplay &&
+ vc4_state->crtc_h == state->crtc->mode.vdisplay;
+ /* Background fill might be necessary when the plane has per-pixel
+ * alpha content and blends from the background or does not cover
+ * the entire screen.
+ */
+ vc4_state->needs_bg_fill = fb->format->has_alpha || !covers_screen;
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index b9749cb24063..a141496104a6 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -848,6 +848,7 @@ enum hvs_pixel_format {
#define SCALER_POS2_ALPHA_MODE_FIXED 1
#define SCALER_POS2_ALPHA_MODE_FIXED_NONZERO 2
#define SCALER_POS2_ALPHA_MODE_FIXED_OVER_0x07 3
+#define SCALER_POS2_ALPHA_PREMULT BIT(29)
#define SCALER_POS2_HEIGHT_MASK VC4_MASK(27, 16)
#define SCALER_POS2_HEIGHT_SHIFT 16
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 2db485abb186..eec76af49f04 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -753,7 +753,7 @@ validate_gl_shader_rec(struct drm_device *dev,
28, /* cs */
};
uint32_t shader_reloc_count = ARRAY_SIZE(shader_reloc_offsets);
- struct drm_gem_cma_object *bo[shader_reloc_count + 8];
+ struct drm_gem_cma_object *bo[ARRAY_SIZE(shader_reloc_offsets) + 8];
uint32_t nr_attributes, nr_relocs, packet_size;
int i;
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index ad80211e1098..794cc9d5c9b0 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -7,6 +7,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
- vmwgfx_simple_resource.o vmwgfx_va.o
+ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index 6e0ccb70a700..88e72bf9a534 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -372,6 +372,14 @@ SVGAGuestPtr;
* PA, not biased by the offset. When the command buffer is finished
* the guest should not read the offset field as there is no guarantee
* what it will set to.
+ *
+ * When the SVGA_CAP_HP_CMD_QUEUE cap bit is set a new command queue
+ * SVGA_CB_CONTEXT_1 is available. Commands submitted to this queue
+ * will be executed as quickly as possible by the SVGA device
+ * potentially before already queued commands on SVGA_CB_CONTEXT_0.
+ * The SVGA device guarantees that any command buffers submitted to
+ * SVGA_CB_CONTEXT_0 will be executed after any _already_ submitted
+ * command buffers to SVGA_CB_CONTEXT_1.
*/
#define SVGA_CB_MAX_SIZE (512 * 1024) /* 512 KB */
@@ -382,7 +390,8 @@ SVGAGuestPtr;
typedef enum {
SVGA_CB_CONTEXT_DEVICE = 0x3f,
SVGA_CB_CONTEXT_0 = 0x0,
- SVGA_CB_CONTEXT_MAX = 0x1,
+ SVGA_CB_CONTEXT_1 = 0x1, /* Supported with SVGA_CAP_HP_CMD_QUEUE */
+ SVGA_CB_CONTEXT_MAX = 0x2,
} SVGACBContext;
@@ -689,6 +698,7 @@ SVGASignedPoint;
#define SVGA_CAP_CMD_BUFFERS_2 0x04000000
#define SVGA_CAP_GBOBJECTS 0x08000000
#define SVGA_CAP_DX 0x10000000
+#define SVGA_CAP_HP_CMD_QUEUE 0x20000000
#define SVGA_CAP_CMD_RESERVED 0x80000000
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
new file mode 100644
index 000000000000..e8c94b19db7b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -0,0 +1,506 @@
+/**************************************************************************
+ *
+ * Copyright © 2017 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_drv.h"
+
+/*
+ * Template that implements find_first_diff() for a generic
+ * unsigned integer type. @size and return value are in bytes.
+ */
+#define VMW_FIND_FIRST_DIFF(_type) \
+static size_t vmw_find_first_diff_ ## _type \
+ (const _type * dst, const _type * src, size_t size)\
+{ \
+ size_t i; \
+ \
+ for (i = 0; i < size; i += sizeof(_type)) { \
+ if (*dst++ != *src++) \
+ break; \
+ } \
+ \
+ return i; \
+}
+
+
+/*
+ * Template that implements find_last_diff() for a generic
+ * unsigned integer type. Pointers point to the item following the
+ * *end* of the area to be examined. @size and return value are in
+ * bytes.
+ */
+#define VMW_FIND_LAST_DIFF(_type) \
+static ssize_t vmw_find_last_diff_ ## _type( \
+ const _type * dst, const _type * src, size_t size) \
+{ \
+ while (size) { \
+ if (*--dst != *--src) \
+ break; \
+ \
+ size -= sizeof(_type); \
+ } \
+ return size; \
+}
+
+
+/*
+ * Instantiate find diff functions for relevant unsigned integer sizes,
+ * assuming that wider integers are faster (including aligning) up to the
+ * architecture native width, which is assumed to be 32 bit unless
+ * CONFIG_64BIT is defined.
+ */
+VMW_FIND_FIRST_DIFF(u8);
+VMW_FIND_LAST_DIFF(u8);
+
+VMW_FIND_FIRST_DIFF(u16);
+VMW_FIND_LAST_DIFF(u16);
+
+VMW_FIND_FIRST_DIFF(u32);
+VMW_FIND_LAST_DIFF(u32);
+
+#ifdef CONFIG_64BIT
+VMW_FIND_FIRST_DIFF(u64);
+VMW_FIND_LAST_DIFF(u64);
+#endif
+
+
+/* We use size aligned copies. This computes (addr - align(addr)) */
+#define SPILL(_var, _type) ((unsigned long) _var & (sizeof(_type) - 1))
+
+
+/*
+ * Template to compute find_first_diff() for a certain integer type
+ * including a head copy for alignment, and adjustment of parameters
+ * for tail find or increased resolution find using an unsigned integer find
+ * of smaller width. If finding is complete, and resolution is sufficient,
+ * the macro executes a return statement. Otherwise it falls through.
+ */
+#define VMW_TRY_FIND_FIRST_DIFF(_type) \
+do { \
+ unsigned int spill = SPILL(dst, _type); \
+ size_t diff_offs; \
+ \
+ if (spill && spill == SPILL(src, _type) && \
+ sizeof(_type) - spill <= size) { \
+ spill = sizeof(_type) - spill; \
+ diff_offs = vmw_find_first_diff_u8(dst, src, spill); \
+ if (diff_offs < spill) \
+ return round_down(offset + diff_offs, granularity); \
+ \
+ dst += spill; \
+ src += spill; \
+ size -= spill; \
+ offset += spill; \
+ spill = 0; \
+ } \
+ if (!spill && !SPILL(src, _type)) { \
+ size_t to_copy = size & ~(sizeof(_type) - 1); \
+ \
+ diff_offs = vmw_find_first_diff_ ## _type \
+ ((_type *) dst, (_type *) src, to_copy); \
+ if (diff_offs >= size || granularity == sizeof(_type)) \
+ return (offset + diff_offs); \
+ \
+ dst += diff_offs; \
+ src += diff_offs; \
+ size -= diff_offs; \
+ offset += diff_offs; \
+ } \
+} while (0) \
+
+
+/**
+ * vmw_find_first_diff - find the first difference between dst and src
+ *
+ * @dst: The destination address
+ * @src: The source address
+ * @size: Number of bytes to compare
+ * @granularity: The granularity needed for the return value in bytes.
+ * return: The offset from find start where the first difference was
+ * encountered in bytes. If no difference was found, the function returns
+ * a value >= @size.
+ */
+static size_t vmw_find_first_diff(const u8 *dst, const u8 *src, size_t size,
+ size_t granularity)
+{
+ size_t offset = 0;
+
+ /*
+ * Try finding with large integers if alignment allows, or we can
+ * fix it. Fall through if we need better resolution or alignment
+ * was bad.
+ */
+#ifdef CONFIG_64BIT
+ VMW_TRY_FIND_FIRST_DIFF(u64);
+#endif
+ VMW_TRY_FIND_FIRST_DIFF(u32);
+ VMW_TRY_FIND_FIRST_DIFF(u16);
+
+ return round_down(offset + vmw_find_first_diff_u8(dst, src, size),
+ granularity);
+}
+
+
+/*
+ * Template to compute find_last_diff() for a certain integer type
+ * including a tail copy for alignment, and adjustment of parameters
+ * for head find or increased resolution find using an unsigned integer find
+ * of smaller width. If finding is complete, and resolution is sufficient,
+ * the macro executes a return statement. Otherwise it falls through.
+ */
+#define VMW_TRY_FIND_LAST_DIFF(_type) \
+do { \
+ unsigned int spill = SPILL(dst, _type); \
+ ssize_t location; \
+ ssize_t diff_offs; \
+ \
+ if (spill && spill <= size && spill == SPILL(src, _type)) { \
+ diff_offs = vmw_find_last_diff_u8(dst, src, spill); \
+ if (diff_offs) { \
+ location = size - spill + diff_offs - 1; \
+ return round_down(location, granularity); \
+ } \
+ \
+ dst -= spill; \
+ src -= spill; \
+ size -= spill; \
+ spill = 0; \
+ } \
+ if (!spill && !SPILL(src, _type)) { \
+ size_t to_copy = round_down(size, sizeof(_type)); \
+ \
+ diff_offs = vmw_find_last_diff_ ## _type \
+ ((_type *) dst, (_type *) src, to_copy); \
+ location = size - to_copy + diff_offs - sizeof(_type); \
+ if (location < 0 || granularity == sizeof(_type)) \
+ return location; \
+ \
+ dst -= to_copy - diff_offs; \
+ src -= to_copy - diff_offs; \
+ size -= to_copy - diff_offs; \
+ } \
+} while (0)
+
+
+/**
+ * vmw_find_last_diff - find the last difference between dst and src
+ *
+ * @dst: The destination address
+ * @src: The source address
+ * @size: Number of bytes to compare
+ * @granularity: The granularity needed for the return value in bytes.
+ * return: The offset from find start where the last difference was
+ * encountered in bytes, or a negative value if no difference was found.
+ */
+static ssize_t vmw_find_last_diff(const u8 *dst, const u8 *src, size_t size,
+ size_t granularity)
+{
+ dst += size;
+ src += size;
+
+#ifdef CONFIG_64BIT
+ VMW_TRY_FIND_LAST_DIFF(u64);
+#endif
+ VMW_TRY_FIND_LAST_DIFF(u32);
+ VMW_TRY_FIND_LAST_DIFF(u16);
+
+ return round_down(vmw_find_last_diff_u8(dst, src, size) - 1,
+ granularity);
+}
+
+
+/**
+ * vmw_memcpy - A wrapper around kernel memcpy with allowing to plug it into a
+ * struct vmw_diff_cpy.
+ *
+ * @diff: The struct vmw_diff_cpy closure argument (unused).
+ * @dest: The copy destination.
+ * @src: The copy source.
+ * @n: Number of bytes to copy.
+ */
+void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n)
+{
+ memcpy(dest, src, n);
+}
+
+
+/**
+ * vmw_adjust_rect - Adjust rectangle coordinates for newly found difference
+ *
+ * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
+ * @diff_offs: The offset from @diff->line_offset where the difference was
+ * found.
+ */
+static void vmw_adjust_rect(struct vmw_diff_cpy *diff, size_t diff_offs)
+{
+ size_t offs = (diff_offs + diff->line_offset) / diff->cpp;
+ struct drm_rect *rect = &diff->rect;
+
+ rect->x1 = min_t(int, rect->x1, offs);
+ rect->x2 = max_t(int, rect->x2, offs + 1);
+ rect->y1 = min_t(int, rect->y1, diff->line);
+ rect->y2 = max_t(int, rect->y2, diff->line + 1);
+}
+
+/**
+ * vmw_diff_memcpy - memcpy that creates a bounding box of modified content.
+ *
+ * @diff: The struct vmw_diff_cpy used to track the modified bounding box.
+ * @dest: The copy destination.
+ * @src: The copy source.
+ * @n: Number of bytes to copy.
+ *
+ * In order to correctly track the modified content, the field @diff->line must
+ * be pre-loaded with the current line number, the field @diff->line_offset must
+ * be pre-loaded with the line offset in bytes where the copy starts, and
+ * finally the field @diff->cpp need to be preloaded with the number of bytes
+ * per unit in the horizontal direction of the area we're examining.
+ * Typically bytes per pixel.
+ * This is needed to know the needed granularity of the difference computing
+ * operations. A higher cpp generally leads to faster execution at the cost of
+ * bounding box width precision.
+ */
+void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
+ size_t n)
+{
+ ssize_t csize, byte_len;
+
+ if (WARN_ON_ONCE(round_down(n, diff->cpp) != n))
+ return;
+
+ /* TODO: Possibly use a single vmw_find_first_diff per line? */
+ csize = vmw_find_first_diff(dest, src, n, diff->cpp);
+ if (csize < n) {
+ vmw_adjust_rect(diff, csize);
+ byte_len = diff->cpp;
+
+ /*
+ * Starting from where first difference was found, find
+ * location of last difference, and then copy.
+ */
+ diff->line_offset += csize;
+ dest += csize;
+ src += csize;
+ n -= csize;
+ csize = vmw_find_last_diff(dest, src, n, diff->cpp);
+ if (csize >= 0) {
+ byte_len += csize;
+ vmw_adjust_rect(diff, csize);
+ }
+ memcpy(dest, src, byte_len);
+ }
+ diff->line_offset += n;
+}
+
+/**
+ * struct vmw_bo_blit_line_data - Convenience argument to vmw_bo_cpu_blit_line
+ *
+ * @mapped_dst: Already mapped destination page index in @dst_pages.
+ * @dst_addr: Kernel virtual address of mapped destination page.
+ * @dst_pages: Array of destination bo pages.
+ * @dst_num_pages: Number of destination bo pages.
+ * @dst_prot: Destination bo page protection.
+ * @mapped_src: Already mapped source page index in @dst_pages.
+ * @src_addr: Kernel virtual address of mapped source page.
+ * @src_pages: Array of source bo pages.
+ * @src_num_pages: Number of source bo pages.
+ * @src_prot: Source bo page protection.
+ * @diff: Struct vmw_diff_cpy, in the end forwarded to the memcpy routine.
+ */
+struct vmw_bo_blit_line_data {
+ u32 mapped_dst;
+ u8 *dst_addr;
+ struct page **dst_pages;
+ u32 dst_num_pages;
+ pgprot_t dst_prot;
+ u32 mapped_src;
+ u8 *src_addr;
+ struct page **src_pages;
+ u32 src_num_pages;
+ pgprot_t src_prot;
+ struct vmw_diff_cpy *diff;
+};
+
+/**
+ * vmw_bo_cpu_blit_line - Blit part of a line from one bo to another.
+ *
+ * @d: Blit data as described above.
+ * @dst_offset: Destination copy start offset from start of bo.
+ * @src_offset: Source copy start offset from start of bo.
+ * @bytes_to_copy: Number of bytes to copy in this line.
+ */
+static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
+ u32 dst_offset,
+ u32 src_offset,
+ u32 bytes_to_copy)
+{
+ struct vmw_diff_cpy *diff = d->diff;
+
+ while (bytes_to_copy) {
+ u32 copy_size = bytes_to_copy;
+ u32 dst_page = dst_offset >> PAGE_SHIFT;
+ u32 src_page = src_offset >> PAGE_SHIFT;
+ u32 dst_page_offset = dst_offset & ~PAGE_MASK;
+ u32 src_page_offset = src_offset & ~PAGE_MASK;
+ bool unmap_dst = d->dst_addr && dst_page != d->mapped_dst;
+ bool unmap_src = d->src_addr && (src_page != d->mapped_src ||
+ unmap_dst);
+
+ copy_size = min_t(u32, copy_size, PAGE_SIZE - dst_page_offset);
+ copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
+
+ if (unmap_src) {
+ ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
+ d->src_addr = NULL;
+ }
+
+ if (unmap_dst) {
+ ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
+ d->dst_addr = NULL;
+ }
+
+ if (!d->dst_addr) {
+ if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
+ return -EINVAL;
+
+ d->dst_addr =
+ ttm_kmap_atomic_prot(d->dst_pages[dst_page],
+ d->dst_prot);
+ if (!d->dst_addr)
+ return -ENOMEM;
+
+ d->mapped_dst = dst_page;
+ }
+
+ if (!d->src_addr) {
+ if (WARN_ON_ONCE(src_page >= d->src_num_pages))
+ return -EINVAL;
+
+ d->src_addr =
+ ttm_kmap_atomic_prot(d->src_pages[src_page],
+ d->src_prot);
+ if (!d->src_addr)
+ return -ENOMEM;
+
+ d->mapped_src = src_page;
+ }
+ diff->do_cpy(diff, d->dst_addr + dst_page_offset,
+ d->src_addr + src_page_offset, copy_size);
+
+ bytes_to_copy -= copy_size;
+ dst_offset += copy_size;
+ src_offset += copy_size;
+ }
+
+ return 0;
+}
+
+/**
+ * ttm_bo_cpu_blit - in-kernel cpu blit.
+ *
+ * @dst: Destination buffer object.
+ * @dst_offset: Destination offset of blit start in bytes.
+ * @dst_stride: Destination stride in bytes.
+ * @src: Source buffer object.
+ * @src_offset: Source offset of blit start in bytes.
+ * @src_stride: Source stride in bytes.
+ * @w: Width of blit.
+ * @h: Height of blit.
+ * return: Zero on success. Negative error value on failure. Will print out
+ * kernel warnings on caller bugs.
+ *
+ * Performs a CPU blit from one buffer object to another avoiding a full
+ * bo vmap which may exhaust- or fragment vmalloc space.
+ * On supported architectures (x86), we're using kmap_atomic which avoids
+ * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
+ * reference already set-up mappings.
+ *
+ * Neither of the buffer objects may be placed in PCI memory
+ * (Fixed memory in TTM terminology) when using this function.
+ */
+int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+ u32 dst_offset, u32 dst_stride,
+ struct ttm_buffer_object *src,
+ u32 src_offset, u32 src_stride,
+ u32 w, u32 h,
+ struct vmw_diff_cpy *diff)
+{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+ u32 j, initial_line = dst_offset / dst_stride;
+ struct vmw_bo_blit_line_data d;
+ int ret = 0;
+
+ /* Buffer objects need to be either pinned or reserved: */
+ if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
+ lockdep_assert_held(&dst->resv->lock.base);
+ if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
+ lockdep_assert_held(&src->resv->lock.base);
+
+ if (dst->ttm->state == tt_unpopulated) {
+ ret = dst->ttm->bdev->driver->ttm_tt_populate(dst->ttm, &ctx);
+ if (ret)
+ return ret;
+ }
+
+ if (src->ttm->state == tt_unpopulated) {
+ ret = src->ttm->bdev->driver->ttm_tt_populate(src->ttm, &ctx);
+ if (ret)
+ return ret;
+ }
+
+ d.mapped_dst = 0;
+ d.mapped_src = 0;
+ d.dst_addr = NULL;
+ d.src_addr = NULL;
+ d.dst_pages = dst->ttm->pages;
+ d.src_pages = src->ttm->pages;
+ d.dst_num_pages = dst->num_pages;
+ d.src_num_pages = src->num_pages;
+ d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
+ d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
+ d.diff = diff;
+
+ for (j = 0; j < h; ++j) {
+ diff->line = j + initial_line;
+ diff->line_offset = dst_offset % dst_stride;
+ ret = vmw_bo_cpu_blit_line(&d, dst_offset, src_offset, w);
+ if (ret)
+ goto out;
+
+ dst_offset += dst_stride;
+ src_offset += src_stride;
+ }
+out:
+ if (d.src_addr)
+ ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
+ if (d.dst_addr)
+ ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 7177eecb8c9f..21111fd091f9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -185,6 +185,22 @@ static const struct ttm_place evictable_placement_flags[] = {
}
};
+static const struct ttm_place nonfixed_placement_flags[] = {
+ {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+ }, {
+ .fpfn = 0,
+ .lpfn = 0,
+ .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED
+ }
+};
+
struct ttm_placement vmw_evictable_placement = {
.num_placement = 4,
.placement = evictable_placement_flags,
@@ -213,6 +229,13 @@ struct ttm_placement vmw_mob_ne_placement = {
.busy_placement = &mob_ne_placement_flags
};
+struct ttm_placement vmw_nonfixed_placement = {
+ .num_placement = 3,
+ .placement = nonfixed_placement_flags,
+ .num_busy_placement = 1,
+ .busy_placement = &sys_placement_flags
+};
+
struct vmw_ttm_tt {
struct ttm_dma_tt dma_ttm;
struct vmw_private *dev_priv;
@@ -841,6 +864,7 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
*/
static void vmw_swap_notify(struct ttm_buffer_object *bo)
{
+ vmw_resource_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index f283324ce598..9f45d5004cae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -101,6 +101,7 @@ struct vmw_cmdbuf_context {
* @handle: DMA address handle for the command buffer space if @using_mob is
* false. Immutable.
* @size: The size of the command buffer space. Immutable.
+ * @num_contexts: Number of contexts actually enabled.
*/
struct vmw_cmdbuf_man {
struct mutex cur_mutex;
@@ -128,6 +129,7 @@ struct vmw_cmdbuf_man {
bool has_pool;
dma_addr_t handle;
size_t size;
+ u32 num_contexts;
};
/**
@@ -185,7 +187,7 @@ struct vmw_cmdbuf_alloc_info {
/* Loop over each context in the command buffer manager. */
#define for_each_cmdbuf_ctx(_man, _i, _ctx) \
- for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < SVGA_CB_CONTEXT_MAX; \
+ for (_i = 0, _ctx = &(_man)->ctx[0]; (_i) < (_man)->num_contexts; \
++(_i), ++(_ctx))
static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
@@ -514,6 +516,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
int i;
struct vmw_cmdbuf_context *ctx;
+ bool global_block = false;
for_each_cmdbuf_ctx(man, i, ctx) {
INIT_LIST_HEAD(&restart_head[i]);
@@ -531,6 +534,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
list_del_init(&entry->list);
restart[entry->cb_context] = true;
+ global_block = true;
if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
DRM_ERROR("Unknown command causing device error.\n");
@@ -564,23 +568,21 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
cb_hdr->length -= new_start_offset;
cb_hdr->errorOffset = 0;
cb_hdr->offset = 0;
+
list_add_tail(&entry->list, &restart_head[entry->cb_context]);
- man->ctx[entry->cb_context].block_submission = true;
}
+
+ for_each_cmdbuf_ctx(man, i, ctx)
+ man->ctx[i].block_submission = true;
+
spin_unlock(&man->lock);
- /* Preempt all contexts with errors */
- for_each_cmdbuf_ctx(man, i, ctx) {
- if (ctx->block_submission && vmw_cmdbuf_preempt(man, i))
- DRM_ERROR("Failed preempting command buffer "
- "context %u.\n", i);
- }
+ /* Preempt all contexts */
+ if (global_block && vmw_cmdbuf_preempt(man, 0))
+ DRM_ERROR("Failed preempting command buffer contexts\n");
spin_lock(&man->lock);
for_each_cmdbuf_ctx(man, i, ctx) {
- if (!ctx->block_submission)
- continue;
-
/* Move preempted command buffers to the preempted queue. */
vmw_cmdbuf_ctx_process(man, ctx, &dummy);
@@ -594,19 +596,16 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
* Finally add all command buffers first in the submitted
* queue, to rerun them.
*/
- list_splice_init(&restart_head[i], &ctx->submitted);
ctx->block_submission = false;
+ list_splice_init(&restart_head[i], &ctx->submitted);
}
vmw_cmdbuf_man_process(man);
spin_unlock(&man->lock);
- for_each_cmdbuf_ctx(man, i, ctx) {
- if (restart[i] && vmw_cmdbuf_startstop(man, i, true))
- DRM_ERROR("Failed restarting command buffer "
- "context %u.\n", i);
- }
+ if (global_block && vmw_cmdbuf_startstop(man, 0, true))
+ DRM_ERROR("Failed restarting command buffer contexts\n");
/* Send a new fence in case one was removed */
if (send_fence) {
@@ -1307,6 +1306,8 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
if (!man)
return ERR_PTR(-ENOMEM);
+ man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ?
+ 2 : 1;
man->headers = dma_pool_create("vmwgfx cmdbuf",
&dev_priv->dev->pdev->dev,
sizeof(SVGACBHeader),
@@ -1341,14 +1342,11 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv)
INIT_WORK(&man->work, &vmw_cmdbuf_work_func);
vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ERROR,
&dev_priv->error_waiters);
- for_each_cmdbuf_ctx(man, i, ctx) {
- ret = vmw_cmdbuf_startstop(man, i, true);
- if (ret) {
- DRM_ERROR("Failed starting command buffer "
- "context %u.\n", i);
- vmw_cmdbuf_man_destroy(man);
- return ERR_PTR(ret);
- }
+ ret = vmw_cmdbuf_startstop(man, 0, true);
+ if (ret) {
+ DRM_ERROR("Failed starting command buffer contexts\n");
+ vmw_cmdbuf_man_destroy(man);
+ return ERR_PTR(ret);
}
return man;
@@ -1398,16 +1396,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
*/
void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man)
{
- struct vmw_cmdbuf_context *ctx;
- unsigned int i;
-
WARN_ON_ONCE(man->has_pool);
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
- for_each_cmdbuf_ctx(man, i, ctx)
- if (vmw_cmdbuf_startstop(man, i, false))
- DRM_ERROR("Failed stopping command buffer "
- "context %u.\n", i);
+ if (vmw_cmdbuf_startstop(man, 0, false))
+ DRM_ERROR("Failed stopping command buffer contexts.\n");
vmw_generic_waiter_remove(man->dev_priv, SVGA_IRQFLAG_ERROR,
&man->dev_priv->error_waiters);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index d45d2caffa5a..d59d9dd16ebc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -323,3 +323,54 @@ void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
+
+
+/*
+ * vmw_dma_buffer_unmap - Tear down a cached buffer object map.
+ *
+ * @vbo: The buffer object whose map we are tearing down.
+ *
+ * This function tears down a cached map set up using
+ * vmw_dma_buffer_map_and_cache().
+ */
+void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo)
+{
+ if (vbo->map.bo == NULL)
+ return;
+
+ ttm_bo_kunmap(&vbo->map);
+}
+
+
+/*
+ * vmw_dma_buffer_map_and_cache - Map a buffer object and cache the map
+ *
+ * @vbo: The buffer object to map
+ * Return: A kernel virtual address or NULL if mapping failed.
+ *
+ * This function maps a buffer object into the kernel address space, or
+ * returns the virtual kernel address of an already existing map. The virtual
+ * address remains valid as long as the buffer object is pinned or reserved.
+ * The cached map is torn down on either
+ * 1) Buffer object move
+ * 2) Buffer object swapout
+ * 3) Buffer object destruction
+ *
+ */
+void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo)
+{
+ struct ttm_buffer_object *bo = &vbo->base;
+ bool not_used;
+ void *virtual;
+ int ret;
+
+ virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
+ if (virtual)
+ return virtual;
+
+ ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map);
+ if (ret)
+ DRM_ERROR("Buffer object map failed: %d.\n", ret);
+
+ return ttm_kmap_obj_virtual(&vbo->map, &not_used);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 184340d486c3..61a03ac90f8c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -301,6 +301,8 @@ static void vmw_print_capabilities(uint32_t capabilities)
DRM_INFO(" Guest Backed Resources.\n");
if (capabilities & SVGA_CAP_DX)
DRM_INFO(" DX Features.\n");
+ if (capabilities & SVGA_CAP_HP_CMD_QUEUE)
+ DRM_INFO(" HP Command Queue.\n");
}
/**
@@ -1277,8 +1279,7 @@ static void vmw_master_drop(struct drm_device *dev,
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
- if (dev_priv->enable_fb)
- vmw_fb_on(dev_priv);
+ vmw_fb_refresh(dev_priv);
}
/**
@@ -1368,28 +1369,23 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
switch (val) {
case PM_HIBERNATION_PREPARE:
- if (dev_priv->enable_fb)
- vmw_fb_off(dev_priv);
- ttm_suspend_lock(&dev_priv->reservation_sem);
-
/*
- * This empties VRAM and unbinds all GMR bindings.
- * Buffer contents is moved to swappable memory.
+ * Take the reservation sem in write mode, which will make sure
+ * there are no other processes holding a buffer object
+ * reservation, meaning we should be able to evict all buffer
+ * objects if needed.
+ * Once user-space processes have been frozen, we can release
+ * the lock again.
*/
- vmw_execbuf_release_pinned_bo(dev_priv);
- vmw_resource_evict_all(dev_priv);
- vmw_release_device_early(dev_priv);
- ttm_bo_swapout_all(&dev_priv->bdev);
- vmw_fence_fifo_down(dev_priv->fman);
+ ttm_suspend_lock(&dev_priv->reservation_sem);
+ dev_priv->suspend_locked = true;
break;
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
- vmw_fence_fifo_up(dev_priv->fman);
- ttm_suspend_unlock(&dev_priv->reservation_sem);
- if (dev_priv->enable_fb)
- vmw_fb_on(dev_priv);
- break;
- case PM_RESTORE_PREPARE:
+ if (READ_ONCE(dev_priv->suspend_locked)) {
+ dev_priv->suspend_locked = false;
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ }
break;
default:
break;
@@ -1440,25 +1436,48 @@ static int vmw_pm_freeze(struct device *kdev)
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ int ret;
- dev_priv->suspended = true;
+ /*
+ * Unlock for vmw_kms_suspend.
+ * No user-space processes should be running now.
+ */
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ ret = vmw_kms_suspend(dev_priv->dev);
+ if (ret) {
+ ttm_suspend_lock(&dev_priv->reservation_sem);
+ DRM_ERROR("Failed to freeze modesetting.\n");
+ return ret;
+ }
if (dev_priv->enable_fb)
- vmw_fifo_resource_dec(dev_priv);
+ vmw_fb_off(dev_priv);
+ ttm_suspend_lock(&dev_priv->reservation_sem);
+ vmw_execbuf_release_pinned_bo(dev_priv);
+ vmw_resource_evict_all(dev_priv);
+ vmw_release_device_early(dev_priv);
+ ttm_bo_swapout_all(&dev_priv->bdev);
+ if (dev_priv->enable_fb)
+ vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
DRM_ERROR("Can't hibernate while 3D resources are active.\n");
if (dev_priv->enable_fb)
vmw_fifo_resource_inc(dev_priv);
WARN_ON(vmw_request_device_late(dev_priv));
- dev_priv->suspended = false;
+ dev_priv->suspend_locked = false;
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ if (dev_priv->suspend_state)
+ vmw_kms_resume(dev);
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
+ vmw_fb_refresh(dev_priv);
return -EBUSY;
}
- if (dev_priv->enable_fb)
- __vmw_svga_disable(dev_priv);
+ vmw_fence_fifo_down(dev_priv->fman);
+ __vmw_svga_disable(dev_priv);
vmw_release_device_late(dev_priv);
-
return 0;
}
@@ -1482,7 +1501,16 @@ static int vmw_pm_restore(struct device *kdev)
if (dev_priv->enable_fb)
__vmw_svga_enable(dev_priv);
- dev_priv->suspended = false;
+ vmw_fence_fifo_up(dev_priv->fman);
+ dev_priv->suspend_locked = false;
+ ttm_suspend_unlock(&dev_priv->reservation_sem);
+ if (dev_priv->suspend_state)
+ vmw_kms_resume(dev_priv->dev);
+
+ if (dev_priv->enable_fb)
+ vmw_fb_on(dev_priv);
+
+ vmw_fb_refresh(dev_priv);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d08753e8fd94..9e60de95b863 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -43,10 +43,10 @@
#include <linux/sync_file.h>
#define VMWGFX_DRIVER_NAME "vmwgfx"
-#define VMWGFX_DRIVER_DATE "20170612"
+#define VMWGFX_DRIVER_DATE "20180322"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 14
-#define VMWGFX_DRIVER_PATCHLEVEL 0
+#define VMWGFX_DRIVER_PATCHLEVEL 1
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
@@ -92,6 +92,8 @@ struct vmw_dma_buffer {
s32 pin_count;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
+ /* Protected by reservation */
+ struct ttm_bo_kmap_obj map;
};
/**
@@ -423,6 +425,7 @@ struct vmw_private {
struct vmw_framebuffer *implicit_fb;
struct mutex global_kms_state_mutex;
spinlock_t cursor_lock;
+ struct drm_atomic_state *suspend_state;
/*
* Context and surface management.
@@ -494,8 +497,8 @@ struct vmw_private {
struct vmw_master *active_master;
struct vmw_master fbdev_master;
struct notifier_block pm_nb;
- bool suspended;
bool refuse_hibernation;
+ bool suspend_locked;
struct mutex release_mutex;
atomic_t num_fifo_resources;
@@ -673,11 +676,13 @@ extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
+extern void vmw_resource_swap_notify(struct ttm_buffer_object *bo);
extern int vmw_query_readback_all(struct vmw_dma_buffer *dx_query_mob);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
+
/**
* DMA buffer helper routines - vmwgfx_dmabuf.c
*/
@@ -700,6 +705,8 @@ extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
extern void vmw_bo_pin_reserved(struct vmw_dma_buffer *bo, bool pin);
+extern void *vmw_dma_buffer_map_and_cache(struct vmw_dma_buffer *vbo);
+extern void vmw_dma_buffer_unmap(struct vmw_dma_buffer *vbo);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -766,6 +773,7 @@ extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_mob_ne_placement;
+extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
@@ -902,6 +910,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv);
int vmw_fb_close(struct vmw_private *dev_priv);
int vmw_fb_off(struct vmw_private *vmw_priv);
int vmw_fb_on(struct vmw_private *vmw_priv);
+void vmw_fb_refresh(struct vmw_private *vmw_priv);
/**
* Kernel modesetting - vmwgfx_kms.c
@@ -938,6 +947,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
+int vmw_kms_suspend(struct drm_device *dev);
+int vmw_kms_resume(struct drm_device *dev);
int vmw_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
@@ -1165,6 +1176,53 @@ extern int vmw_cmdbuf_cur_flush(struct vmw_cmdbuf_man *man,
bool interruptible);
extern void vmw_cmdbuf_irqthread(struct vmw_cmdbuf_man *man);
+/* CPU blit utilities - vmwgfx_blit.c */
+
+/**
+ * struct vmw_diff_cpy - CPU blit information structure
+ *
+ * @rect: The output bounding box rectangle.
+ * @line: The current line of the blit.
+ * @line_offset: Offset of the current line segment.
+ * @cpp: Bytes per pixel (granularity information).
+ * @memcpy: Which memcpy function to use.
+ */
+struct vmw_diff_cpy {
+ struct drm_rect rect;
+ size_t line;
+ size_t line_offset;
+ int cpp;
+ void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
+ size_t n);
+};
+
+#define VMW_CPU_BLIT_INITIALIZER { \
+ .do_cpy = vmw_memcpy, \
+}
+
+#define VMW_CPU_BLIT_DIFF_INITIALIZER(_cpp) { \
+ .line = 0, \
+ .line_offset = 0, \
+ .rect = { .x1 = INT_MAX/2, \
+ .y1 = INT_MAX/2, \
+ .x2 = INT_MIN/2, \
+ .y2 = INT_MIN/2 \
+ }, \
+ .cpp = _cpp, \
+ .do_cpy = vmw_diff_memcpy, \
+}
+
+void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
+ size_t n);
+
+void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
+
+int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+ u32 dst_offset, u32 dst_stride,
+ struct ttm_buffer_object *src,
+ u32 src_offset, u32 src_stride,
+ u32 w, u32 h,
+ struct vmw_diff_cpy *diff);
/**
* Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index d23a18aae476..2582ffd36bb5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -43,8 +43,6 @@ struct vmw_fb_par {
struct mutex bo_mutex;
struct vmw_dma_buffer *vmw_bo;
- struct ttm_bo_kmap_obj map;
- void *bo_ptr;
unsigned bo_size;
struct drm_framebuffer *set_fb;
struct drm_display_mode *set_mode;
@@ -163,10 +161,17 @@ static int vmw_fb_blank(int blank, struct fb_info *info)
return 0;
}
-/*
- * Dirty code
+/**
+ * vmw_fb_dirty_flush - flush dirty regions to the kms framebuffer
+ *
+ * @work: The struct work_struct associated with this task.
+ *
+ * This function flushes the dirty regions of the vmalloc framebuffer to the
+ * kms framebuffer, and if the kms framebuffer is visible, also updated the
+ * corresponding displays. Note that this function runs even if the kms
+ * framebuffer is not bound to a crtc and thus not visible, but it's turned
+ * off during hibernation using the par->dirty.active bool.
*/
-
static void vmw_fb_dirty_flush(struct work_struct *work)
{
struct vmw_fb_par *par = container_of(work, struct vmw_fb_par,
@@ -174,13 +179,15 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
struct vmw_private *vmw_priv = par->vmw_priv;
struct fb_info *info = vmw_priv->fb_info;
unsigned long irq_flags;
- s32 dst_x1, dst_x2, dst_y1, dst_y2, w, h;
+ s32 dst_x1, dst_x2, dst_y1, dst_y2, w = 0, h = 0;
u32 cpp, max_x, max_y;
struct drm_clip_rect clip;
struct drm_framebuffer *cur_fb;
u8 *src_ptr, *dst_ptr;
+ struct vmw_dma_buffer *vbo = par->vmw_bo;
+ void *virtual;
- if (vmw_priv->suspended)
+ if (!READ_ONCE(par->dirty.active))
return;
mutex_lock(&par->bo_mutex);
@@ -188,10 +195,16 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
if (!cur_fb)
goto out_unlock;
+ (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
+ (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
+ virtual = vmw_dma_buffer_map_and_cache(vbo);
+ if (!virtual)
+ goto out_unreserve;
+
spin_lock_irqsave(&par->dirty.lock, irq_flags);
if (!par->dirty.active) {
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
- goto out_unlock;
+ goto out_unreserve;
}
/*
@@ -221,7 +234,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
spin_unlock_irqrestore(&par->dirty.lock, irq_flags);
if (w && h) {
- dst_ptr = (u8 *)par->bo_ptr +
+ dst_ptr = (u8 *)virtual +
(dst_y1 * par->set_fb->pitches[0] + dst_x1 * cpp);
src_ptr = (u8 *)par->vmalloc +
((dst_y1 + par->fb_y) * info->fix.line_length +
@@ -237,7 +250,12 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
clip.x2 = dst_x2;
clip.y1 = dst_y1;
clip.y2 = dst_y2;
+ }
+out_unreserve:
+ ttm_bo_unreserve(&vbo->base);
+ ttm_read_unlock(&vmw_priv->reservation_sem);
+ if (w && h) {
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
&clip, 1));
vmw_fifo_flush(vmw_priv, false);
@@ -500,22 +518,12 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par,
}
if (cur_fb) {
- drm_framebuffer_unreference(cur_fb);
+ drm_framebuffer_put(cur_fb);
par->set_fb = NULL;
}
- if (par->vmw_bo && detach_bo) {
- struct vmw_private *vmw_priv = par->vmw_priv;
-
- if (par->bo_ptr) {
- ttm_bo_kunmap(&par->map);
- par->bo_ptr = NULL;
- }
- if (unref_bo)
- vmw_dmabuf_unreference(&par->vmw_bo);
- else if (vmw_priv->active_display_unit != vmw_du_legacy)
- vmw_dmabuf_unpin(par->vmw_priv, par->vmw_bo, false);
- }
+ if (par->vmw_bo && detach_bo && unref_bo)
+ vmw_dmabuf_unreference(&par->vmw_bo);
return 0;
}
@@ -636,38 +644,6 @@ static int vmw_fb_set_par(struct fb_info *info)
if (ret)
goto out_unlock;
- if (!par->bo_ptr) {
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
-
- /*
- * Pin before mapping. Since we don't know in what placement
- * to pin, call into KMS to do it for us. LDU doesn't require
- * additional pinning because set_config() would've pinned
- * it already
- */
- if (vmw_priv->active_display_unit != vmw_du_legacy) {
- ret = vfb->pin(vfb);
- if (ret) {
- DRM_ERROR("Could not pin the fbdev "
- "framebuffer.\n");
- goto out_unlock;
- }
- }
-
- ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
- par->vmw_bo->base.num_pages, &par->map);
- if (ret) {
- if (vmw_priv->active_display_unit != vmw_du_legacy)
- vfb->unpin(vfb);
-
- DRM_ERROR("Could not map the fbdev framebuffer.\n");
- goto out_unlock;
- }
-
- par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
- }
-
-
vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
par->set_fb->width, par->set_fb->height);
@@ -883,12 +859,6 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
flush_delayed_work(&info->deferred_work);
flush_delayed_work(&par->local_work);
- mutex_lock(&par->bo_mutex);
- drm_modeset_lock_all(vmw_priv->dev);
- (void) vmw_fb_kms_detach(par, true, false);
- drm_modeset_unlock_all(vmw_priv->dev);
- mutex_unlock(&par->bo_mutex);
-
return 0;
}
@@ -904,10 +874,24 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
info = vmw_priv->fb_info;
par = info->par;
- vmw_fb_set_par(info);
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = true;
spin_unlock_irqrestore(&par->dirty.lock, flags);
return 0;
}
+
+/**
+ * vmw_fb_refresh - Refresh fb display
+ *
+ * @vmw_priv: Pointer to device private
+ *
+ * Call into kms to show the fbdev display(s).
+ */
+void vmw_fb_refresh(struct vmw_private *vmw_priv)
+{
+ if (!vmw_priv->fb_info)
+ return;
+
+ vmw_fb_set_par(vmw_priv->fb_info);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 6c5c75cf5e6c..9ed544f8958f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -901,11 +901,12 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
spin_lock_irq(&dev->event_lock);
if (likely(eaction->tv_sec != NULL)) {
- struct timeval tv;
+ struct timespec64 ts;
- do_gettimeofday(&tv);
- *eaction->tv_sec = tv.tv_sec;
- *eaction->tv_usec = tv.tv_usec;
+ ktime_get_ts64(&ts);
+ /* monotonic time, so no y2038 overflow */
+ *eaction->tv_sec = ts.tv_sec;
+ *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
}
drm_send_event_locked(dev, eaction->event);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 67f844678ac8..c5e8eae0dbe2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -316,7 +316,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
out_no_surface:
ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock:
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
out_no_fb:
drm_modeset_unlock_all(dev);
out_no_copy:
@@ -393,7 +393,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock:
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_put(fb);
out_no_fb:
drm_modeset_unlock_all(dev);
out_no_copy:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 34ecc27fc30a..3628a9fe705f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -393,13 +393,13 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
du->cursor_surface = vps->surf;
du->cursor_dmabuf = vps->dmabuf;
- /* setup new image */
if (vps->surf) {
du->cursor_age = du->cursor_surface->snooper.age;
ret = vmw_cursor_update_image(dev_priv,
vps->surf->snooper.image,
- 64, 64, hotspot_x, hotspot_y);
+ 64, 64, hotspot_x,
+ hotspot_y);
} else if (vps->dmabuf) {
ret = vmw_cursor_update_dmabuf(dev_priv, vps->dmabuf,
plane->state->crtc_w,
@@ -497,11 +497,22 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
struct vmw_surface *surface = NULL;
struct drm_framebuffer *fb = new_state->fb;
+ struct drm_rect src = drm_plane_state_src(new_state);
+ struct drm_rect dest = drm_plane_state_dest(new_state);
/* Turning off */
if (!fb)
return ret;
+ ret = drm_plane_helper_check_update(plane, new_state->crtc, fb,
+ &src, &dest,
+ DRM_MODE_ROTATE_0,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true, &new_state->visible);
+ if (!ret)
+ return ret;
+
/* A lot of the code assumes this */
if (new_state->crtc_w != 64 || new_state->crtc_h != 64) {
DRM_ERROR("Invalid cursor dimensions (%d, %d)\n",
@@ -566,13 +577,9 @@ void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
crtc->state->event = NULL;
spin_lock_irq(&crtc->dev->event_lock);
- if (drm_crtc_vblank_get(crtc) == 0)
- drm_crtc_arm_vblank_event(crtc, event);
- else
- drm_crtc_send_vblank_event(crtc, event);
+ drm_crtc_send_vblank_event(crtc, event);
spin_unlock_irq(&crtc->dev->event_lock);
}
-
}
@@ -675,9 +682,6 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane)
return NULL;
vps->pinned = 0;
-
- /* Mapping is managed by prepare_fb/cleanup_fb */
- memset(&vps->host_map, 0, sizeof(vps->host_map));
vps->cpp = 0;
/* Each ref counted resource needs to be acquired again */
@@ -739,11 +743,6 @@ vmw_du_plane_destroy_state(struct drm_plane *plane,
/* Should have been freed by cleanup_fb */
- if (vps->host_map.virtual) {
- DRM_ERROR("Host mapping not freed\n");
- ttm_bo_kunmap(&vps->host_map);
- }
-
if (vps->surf)
vmw_surface_unreference(&vps->surf);
@@ -888,11 +887,11 @@ static int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
if (dev_priv->active_display_unit == vmw_du_screen_object)
ret = vmw_kms_sou_do_surface_dirty(dev_priv, &vfbs->base,
clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL);
+ num_clips, inc, NULL, NULL);
else
ret = vmw_kms_stdu_surface_dirty(dev_priv, &vfbs->base,
clips, NULL, NULL, 0, 0,
- num_clips, inc, NULL);
+ num_clips, inc, NULL, NULL);
vmw_fifo_flush(dev_priv, false);
ttm_read_unlock(&dev_priv->reservation_sem);
@@ -928,11 +927,12 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
switch (dev_priv->active_display_unit) {
case vmw_du_screen_object:
return vmw_kms_sou_readback(dev_priv, file_priv, vfb,
- user_fence_rep, vclips, num_clips);
+ user_fence_rep, vclips, num_clips,
+ NULL);
case vmw_du_screen_target:
return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
user_fence_rep, NULL, vclips, num_clips,
- 1, false, true);
+ 1, false, true, NULL);
default:
WARN_ONCE(true,
"Readback called with invalid display system.\n");
@@ -1090,12 +1090,12 @@ static int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
case vmw_du_screen_target:
ret = vmw_kms_stdu_dma(dev_priv, NULL, &vfbd->base, NULL,
clips, NULL, num_clips, increment,
- true, true);
+ true, true, NULL);
break;
case vmw_du_screen_object:
ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, &vfbd->base,
clips, NULL, num_clips,
- increment, true, NULL);
+ increment, true, NULL, NULL);
break;
case vmw_du_legacy:
ret = vmw_kms_ldu_do_dmabuf_dirty(dev_priv, &vfbd->base, 0, 0,
@@ -1121,12 +1121,14 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
};
/**
- * Pin the dmabuffer to the start of vram.
+ * Pin the dmabuffer in a location suitable for access by the
+ * display system.
*/
static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_dma_buffer *buf;
+ struct ttm_placement *placement;
int ret;
buf = vfb->dmabuf ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
@@ -1143,12 +1145,24 @@ static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
break;
case vmw_du_screen_object:
case vmw_du_screen_target:
- if (vfb->dmabuf)
- return vmw_dmabuf_pin_in_vram_or_gmr(dev_priv, buf,
- false);
+ if (vfb->dmabuf) {
+ if (dev_priv->capabilities & SVGA_CAP_3D) {
+ /*
+ * Use surface DMA to get content to
+ * sreen target surface.
+ */
+ placement = &vmw_vram_gmr_placement;
+ } else {
+ /* Use CPU blit. */
+ placement = &vmw_sys_placement;
+ }
+ } else {
+ /* Use surface / image update */
+ placement = &vmw_mob_placement;
+ }
- return vmw_dmabuf_pin_in_placement(dev_priv, buf,
- &vmw_mob_placement, false);
+ return vmw_dmabuf_pin_in_placement(dev_priv, buf, placement,
+ false);
default:
return -EINVAL;
}
@@ -1539,35 +1553,10 @@ vmw_kms_atomic_check_modeset(struct drm_device *dev,
return drm_atomic_helper_check(dev, state);
}
-
-/**
- * vmw_kms_atomic_commit - Perform an atomic state commit
- *
- * @dev: DRM device
- * @state: the driver state object
- * @nonblock: Whether nonblocking behaviour is requested
- *
- * This is a simple wrapper around drm_atomic_helper_commit() for
- * us to clear the nonblocking value.
- *
- * Nonblocking commits currently cause synchronization issues
- * for vmwgfx.
- *
- * RETURNS
- * Zero for success or negative error code on failure.
- */
-int vmw_kms_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- return drm_atomic_helper_commit(dev, state, false);
-}
-
-
static const struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
.atomic_check = vmw_kms_atomic_check_modeset,
- .atomic_commit = vmw_kms_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static int vmw_kms_generic_present(struct vmw_private *dev_priv,
@@ -1581,7 +1570,7 @@ static int vmw_kms_generic_present(struct vmw_private *dev_priv,
{
return vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL, clips,
&surface->res, destX, destY,
- num_clips, 1, NULL);
+ num_clips, 1, NULL, NULL);
}
@@ -1600,7 +1589,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
case vmw_du_screen_target:
ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, clips,
&surface->res, destX, destY,
- num_clips, 1, NULL);
+ num_clips, 1, NULL, NULL);
break;
case vmw_du_screen_object:
ret = vmw_kms_generic_present(dev_priv, file_priv, vfb, surface,
@@ -2328,10 +2317,16 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
dirty->dev_priv = dev_priv;
- list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
- if (crtc->primary->fb != &framebuffer->base)
- continue;
- units[num_units++] = vmw_crtc_to_du(crtc);
+ /* If crtc is passed, no need to iterate over other display units */
+ if (dirty->crtc) {
+ units[num_units++] = vmw_crtc_to_du(dirty->crtc);
+ } else {
+ list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
+ head) {
+ if (crtc->primary->fb != &framebuffer->base)
+ continue;
+ units[num_units++] = vmw_crtc_to_du(crtc);
+ }
}
for (k = 0; k < num_units; k++) {
@@ -2430,14 +2425,21 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible,
- bool validate_as_mob)
+ bool validate_as_mob,
+ bool for_cpu_blit)
{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = interruptible,
+ .no_wait_gpu = false};
struct ttm_buffer_object *bo = &buf->base;
int ret;
ttm_bo_reserve(bo, false, false, NULL);
- ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
- validate_as_mob);
+ if (for_cpu_blit)
+ ret = ttm_bo_validate(bo, &vmw_nonfixed_placement, &ctx);
+ else
+ ret = vmw_validate_single_buffer(dev_priv, bo, interruptible,
+ validate_as_mob);
if (ret)
ttm_bo_unreserve(bo);
@@ -2549,7 +2551,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
if (res->backup) {
ret = vmw_kms_helper_buffer_prepare(res->dev_priv, res->backup,
interruptible,
- res->dev_priv->has_mob);
+ res->dev_priv->has_mob,
+ false);
if (ret)
goto out_unreserve;
}
@@ -2845,3 +2848,51 @@ int vmw_kms_set_config(struct drm_mode_set *set,
return drm_atomic_helper_set_config(set, ctx);
}
+
+
+/**
+ * vmw_kms_suspend - Save modesetting state and turn modesetting off.
+ *
+ * @dev: Pointer to the drm device
+ * Return: 0 on success. Negative error code on failure.
+ */
+int vmw_kms_suspend(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+
+ dev_priv->suspend_state = drm_atomic_helper_suspend(dev);
+ if (IS_ERR(dev_priv->suspend_state)) {
+ int ret = PTR_ERR(dev_priv->suspend_state);
+
+ DRM_ERROR("Failed kms suspend: %d\n", ret);
+ dev_priv->suspend_state = NULL;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+
+/**
+ * vmw_kms_resume - Re-enable modesetting and restore state
+ *
+ * @dev: Pointer to the drm device
+ * Return: 0 on success. Negative error code on failure.
+ *
+ * State is resumed from a previous vmw_kms_suspend(). It's illegal
+ * to call this function without a previous vmw_kms_suspend().
+ */
+int vmw_kms_resume(struct drm_device *dev)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ int ret;
+
+ if (WARN_ON(!dev_priv->suspend_state))
+ return 0;
+
+ ret = drm_atomic_helper_resume(dev, dev_priv->suspend_state);
+ dev_priv->suspend_state = NULL;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index cd9da2dd79af..4e8749a8717e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -50,6 +50,7 @@
* @unit: The current display unit. Set up by the helper before a call to @clip.
* @cmd: The allocated fifo space. Set up by the helper before the first @clip
* call.
+ * @crtc: The crtc for which to build dirty commands.
* @num_hits: Number of clip rect commands for this display unit.
* Cleared by the helper before the first @clip call. Updated by the @clip
* callback.
@@ -71,6 +72,7 @@ struct vmw_kms_dirty {
struct vmw_private *dev_priv;
struct vmw_display_unit *unit;
void *cmd;
+ struct drm_crtc *crtc;
u32 num_hits;
s32 fb_x;
s32 fb_y;
@@ -175,7 +177,6 @@ struct vmw_plane_state {
int pinned;
/* For CPU Blit */
- struct ttm_bo_kmap_obj host_map;
unsigned int cpp;
};
@@ -287,7 +288,8 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
int vmw_kms_helper_buffer_prepare(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buf,
bool interruptible,
- bool validate_as_mob);
+ bool validate_as_mob,
+ bool for_cpu_blit);
void vmw_kms_helper_buffer_revert(struct vmw_dma_buffer *buf);
void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
struct drm_file *file_priv,
@@ -398,20 +400,23 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence);
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc);
int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
struct drm_clip_rect *clips,
struct drm_vmw_rect *vclips,
unsigned num_clips, int increment,
bool interruptible,
- struct vmw_fence_obj **out_fence);
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc);
int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *vclips,
- uint32_t num_clips);
+ uint32_t num_clips,
+ struct drm_crtc *crtc);
/*
* Screen Target Display Unit functions - vmwgfx_stdu.c
@@ -425,7 +430,8 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence);
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc);
int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
@@ -435,7 +441,8 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
uint32_t num_clips,
int increment,
bool to_surface,
- bool interruptible);
+ bool interruptible,
+ struct drm_crtc *crtc);
int vmw_kms_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 97000996b8dc..cdff99211602 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -328,7 +328,7 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
{
struct rpc_channel channel;
char *msg, *reply = NULL;
- size_t msg_len, reply_len = 0;
+ size_t reply_len = 0;
int ret = 0;
@@ -338,15 +338,12 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
if (!guest_info_param || !length)
return -EINVAL;
- msg_len = strlen(guest_info_param) + strlen("info-get ") + 1;
- msg = kzalloc(msg_len, GFP_KERNEL);
+ msg = kasprintf(GFP_KERNEL, "info-get %s", guest_info_param);
if (!msg) {
DRM_ERROR("Cannot allocate memory to get %s", guest_info_param);
return -ENOMEM;
}
- sprintf(msg, "info-get %s", guest_info_param);
-
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
vmw_send_msg(&channel, msg) ||
vmw_recv_msg(&channel, (void *) &reply, &reply_len) ||
@@ -388,7 +385,6 @@ int vmw_host_log(const char *log)
{
struct rpc_channel channel;
char *msg;
- int msg_len;
int ret = 0;
@@ -398,15 +394,12 @@ int vmw_host_log(const char *log)
if (!log)
return ret;
- msg_len = strlen(log) + strlen("log ") + 1;
- msg = kzalloc(msg_len, GFP_KERNEL);
+ msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) {
DRM_ERROR("Cannot allocate memory for log message\n");
return -ENOMEM;
}
- sprintf(msg, "log %s", log);
-
if (vmw_open_channel(&channel, RPCI_PROTOCOL_NUM) ||
vmw_send_msg(&channel, msg) ||
vmw_close_channel(&channel)) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 9e101450cc4d..6b3a942b18df 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -354,6 +354,7 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
+ vmw_dma_buffer_unmap(vmw_bo);
kfree(vmw_bo);
}
@@ -361,6 +362,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
+ vmw_dma_buffer_unmap(&vmw_user_bo->dma);
ttm_prime_object_kfree(vmw_user_bo, prime);
}
@@ -1239,6 +1241,12 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+ /*
+ * Kill any cached kernel maps before move. An optimization could
+ * be to do this iff source or destination memory type is VRAM.
+ */
+ vmw_dma_buffer_unmap(dma_buf);
+
if (mem->mem_type != VMW_PL_MOB) {
struct vmw_resource *res, *n;
struct ttm_validate_buffer val_buf;
@@ -1262,6 +1270,21 @@ void vmw_resource_move_notify(struct ttm_buffer_object *bo,
}
+/**
+ * vmw_resource_swap_notify - swapout notify callback.
+ *
+ * @bo: The buffer object to be swapped out.
+ */
+void vmw_resource_swap_notify(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy != vmw_dmabuf_bo_free &&
+ bo->destroy != vmw_user_dmabuf_destroy)
+ return;
+
+ /* Kill any cached kernel maps before swapout */
+ vmw_dma_buffer_unmap(vmw_dma_buffer(bo));
+}
+
/**
* vmw_query_readback_all - Read back cached query states
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 63a4cd794b73..419185f60278 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -316,69 +316,21 @@ static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc,
struct drm_modeset_acquire_ctx *ctx)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
- struct drm_framebuffer *old_fb = crtc->primary->fb;
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
- struct vmw_fence_obj *fence = NULL;
- struct drm_vmw_rect vclips;
int ret;
if (!vmw_kms_crtc_flippable(dev_priv, crtc))
return -EINVAL;
- flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
- ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags, ctx);
+ ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
if (ret) {
DRM_ERROR("Page flip error %d.\n", ret);
return ret;
}
- /* do a full screen dirty update */
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
-
- if (vfb->dmabuf)
- ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb,
- NULL, &vclips, 1, 1,
- true, &fence);
- else
- ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb,
- NULL, &vclips, NULL,
- 0, 0, 1, 1, &fence);
-
-
- if (ret != 0)
- goto out_no_fence;
- if (!fence) {
- ret = -EINVAL;
- goto out_no_fence;
- }
-
- if (event) {
- struct drm_file *file_priv = event->base.file_priv;
-
- ret = vmw_event_fence_action_queue(file_priv, fence,
- &event->base,
- &event->event.vbl.tv_sec,
- &event->event.vbl.tv_usec,
- true);
- }
-
- /*
- * No need to hold on to this now. The only cleanup
- * we need to do if we fail is unref the fence.
- */
- vmw_fence_obj_unreference(&fence);
-
if (vmw_crtc_to_du(crtc)->is_implicit)
vmw_kms_update_implicit_fb(dev_priv, crtc);
return ret;
-
-out_no_fence:
- drm_atomic_set_fb_for_plane(crtc->primary->state, old_fb);
- return ret;
}
static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
@@ -453,7 +405,11 @@ vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
+ struct drm_crtc *crtc = plane->state->crtc ?
+ plane->state->crtc : old_state->crtc;
+ if (vps->dmabuf)
+ vmw_dmabuf_unpin(vmw_priv(crtc->dev), vps->dmabuf, false);
vmw_dmabuf_unreference(&vps->dmabuf);
vps->dmabuf_size = 0;
@@ -491,10 +447,17 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
}
size = new_state->crtc_w * new_state->crtc_h * 4;
+ dev_priv = vmw_priv(crtc->dev);
if (vps->dmabuf) {
- if (vps->dmabuf_size == size)
- return 0;
+ if (vps->dmabuf_size == size) {
+ /*
+ * Note that this might temporarily up the pin-count
+ * to 2, until cleanup_fb() is called.
+ */
+ return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf,
+ true);
+ }
vmw_dmabuf_unreference(&vps->dmabuf);
vps->dmabuf_size = 0;
@@ -504,7 +467,6 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
if (!vps->dmabuf)
return -ENOMEM;
- dev_priv = vmw_priv(crtc->dev);
vmw_svga_enable(dev_priv);
/* After we have alloced the backing store might not be able to
@@ -515,13 +477,16 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
&vmw_vram_ne_placement,
false, &vmw_dmabuf_bo_free);
vmw_overlay_resume_all(dev_priv);
-
- if (ret != 0)
+ if (ret) {
vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */
- else
- vps->dmabuf_size = size;
+ return ret;
+ }
- return ret;
+ /*
+ * TTM already thinks the buffer is pinned, but make sure the
+ * pin_count is upped.
+ */
+ return vmw_dmabuf_pin_in_vram(dev_priv, vps->dmabuf, true);
}
@@ -530,9 +495,71 @@ vmw_sou_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_crtc *crtc = plane->state->crtc;
+ struct drm_pending_vblank_event *event = NULL;
+ struct vmw_fence_obj *fence = NULL;
+ int ret;
+
+ if (crtc && plane->state->fb) {
+ struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+ struct vmw_framebuffer *vfb =
+ vmw_framebuffer_to_vfb(plane->state->fb);
+ struct drm_vmw_rect vclips;
+
+ vclips.x = crtc->x;
+ vclips.y = crtc->y;
+ vclips.w = crtc->mode.hdisplay;
+ vclips.h = crtc->mode.vdisplay;
+
+ if (vfb->dmabuf)
+ ret = vmw_kms_sou_do_dmabuf_dirty(dev_priv, vfb, NULL,
+ &vclips, 1, 1, true,
+ &fence, crtc);
+ else
+ ret = vmw_kms_sou_do_surface_dirty(dev_priv, vfb, NULL,
+ &vclips, NULL, 0, 0,
+ 1, 1, &fence, crtc);
+
+ /*
+ * We cannot really fail this function, so if we do, then output
+ * an error and maintain consistent atomic state.
+ */
+ if (ret != 0)
+ DRM_ERROR("Failed to update screen.\n");
- if (crtc)
crtc->primary->fb = plane->state->fb;
+ } else {
+ /*
+ * When disabling a plane, CRTC and FB should always be NULL
+ * together, otherwise it's an error.
+ * Here primary plane is being disable so should really blank
+ * the screen object display unit, if not already done.
+ */
+ return;
+ }
+
+ event = crtc->state->event;
+ /*
+ * In case of failure and other cases, vblank event will be sent in
+ * vmw_du_crtc_atomic_flush.
+ */
+ if (event && fence) {
+ struct drm_file *file_priv = event->base.file_priv;
+
+ ret = vmw_event_fence_action_queue(file_priv,
+ fence,
+ &event->base,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
+ true);
+
+ if (unlikely(ret != 0))
+ DRM_ERROR("Failed to queue event on fence.\n");
+ else
+ crtc->state->event = NULL;
+ }
+
+ if (fence)
+ vmw_fence_obj_unreference(&fence);
}
@@ -892,6 +919,7 @@ static void vmw_sou_surface_clip(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
+ * @crtc: If crtc is passed, perform surface dirty on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
@@ -904,7 +932,8 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence)
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc)
{
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
@@ -923,6 +952,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
sdirty.base.dev_priv = dev_priv;
sdirty.base.fifo_reserve_size = sizeof(struct vmw_kms_sou_dirty_cmd) +
sizeof(SVGASignedRect) * num_clips;
+ sdirty.base.crtc = crtc;
sdirty.sid = srf->id;
sdirty.left = sdirty.top = S32_MAX;
@@ -994,6 +1024,7 @@ static void vmw_sou_dmabuf_clip(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
+ * @crtc: If crtc is passed, perform dmabuf dirty on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
@@ -1004,7 +1035,8 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
struct drm_vmw_rect *vclips,
unsigned num_clips, int increment,
bool interruptible,
- struct vmw_fence_obj **out_fence)
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc)
{
struct vmw_dma_buffer *buf =
container_of(framebuffer, struct vmw_framebuffer_dmabuf,
@@ -1013,7 +1045,7 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
int ret;
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
- false);
+ false, false);
if (ret)
return ret;
@@ -1021,6 +1053,7 @@ int vmw_kms_sou_do_dmabuf_dirty(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out_revert;
+ dirty.crtc = crtc;
dirty.fifo_commit = vmw_sou_dmabuf_fifo_commit;
dirty.clip = vmw_sou_dmabuf_clip;
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_dmabuf_blit) *
@@ -1092,6 +1125,7 @@ static void vmw_sou_readback_clip(struct vmw_kms_dirty *dirty)
* Must be set to non-NULL if @file_priv is non-NULL.
* @vclips: Array of clip rects.
* @num_clips: Number of clip rects in @vclips.
+ * @crtc: If crtc is passed, readback on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
@@ -1101,14 +1135,16 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *vclips,
- uint32_t num_clips)
+ uint32_t num_clips,
+ struct drm_crtc *crtc)
{
struct vmw_dma_buffer *buf =
container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
struct vmw_kms_dirty dirty;
int ret;
- ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false);
+ ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, true, false,
+ false);
if (ret)
return ret;
@@ -1116,6 +1152,7 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto out_revert;
+ dirty.crtc = crtc;
dirty.fifo_commit = vmw_sou_readback_fifo_commit;
dirty.clip = vmw_sou_readback_clip;
dirty.fifo_reserve_size = sizeof(struct vmw_kms_sou_readback_blit) *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b68d74888ab1..8eec88920851 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -114,7 +114,6 @@ struct vmw_screen_target_display_unit {
bool defined;
/* For CPU Blit */
- struct ttm_bo_kmap_obj host_map;
unsigned int cpp;
};
@@ -492,71 +491,17 @@ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc,
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc);
- struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb);
- struct drm_vmw_rect vclips;
int ret;
- dev_priv = vmw_priv(crtc->dev);
- stdu = vmw_crtc_to_stdu(crtc);
-
if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc))
return -EINVAL;
- /*
- * We're always async, but the helper doesn't know how to set async
- * so lie to the helper. Also, the helper expects someone
- * to pick the event up from the crtc state, and if nobody does,
- * it will free it. Since we handle the event in this function,
- * don't hand it to the helper.
- */
- flags &= ~DRM_MODE_PAGE_FLIP_ASYNC;
- ret = drm_atomic_helper_page_flip(crtc, new_fb, NULL, flags, ctx);
+ ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx);
if (ret) {
DRM_ERROR("Page flip error %d.\n", ret);
return ret;
}
- if (stdu->base.is_implicit)
- vmw_kms_update_implicit_fb(dev_priv, crtc);
-
- /*
- * Now that we've bound a new surface to the screen target,
- * update the contents.
- */
- vclips.x = crtc->x;
- vclips.y = crtc->y;
- vclips.w = crtc->mode.hdisplay;
- vclips.h = crtc->mode.vdisplay;
-
- if (vfb->dmabuf)
- ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL, &vclips,
- 1, 1, true, false);
- else
- ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, &vclips,
- NULL, 0, 0, 1, 1, NULL);
- if (ret) {
- DRM_ERROR("Page flip update error %d.\n", ret);
- return ret;
- }
-
- if (event) {
- struct vmw_fence_obj *fence = NULL;
- struct drm_file *file_priv = event->base.file_priv;
-
- vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (!fence)
- return -ENOMEM;
-
- ret = vmw_event_fence_action_queue(file_priv, fence,
- &event->base,
- &event->event.vbl.tv_sec,
- &event->event.vbl.tv_usec,
- true);
- vmw_fence_obj_unreference(&fence);
- } else {
- (void) vmw_fifo_flush(dev_priv, false);
- }
-
return 0;
}
@@ -693,10 +638,9 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
container_of(dirty->unit, typeof(*stdu), base);
s32 width, height;
s32 src_pitch, dst_pitch;
- u8 *src, *dst;
- bool not_used;
- struct ttm_bo_kmap_obj guest_map;
- int ret;
+ struct ttm_buffer_object *src_bo, *dst_bo;
+ u32 src_offset, dst_offset;
+ struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp);
if (!dirty->num_hits)
return;
@@ -707,57 +651,38 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
if (width == 0 || height == 0)
return;
- ret = ttm_bo_kmap(&ddirty->buf->base, 0, ddirty->buf->base.num_pages,
- &guest_map);
- if (ret) {
- DRM_ERROR("Failed mapping framebuffer for blit: %d\n",
- ret);
- goto out_cleanup;
- }
-
- /* Assume we are blitting from Host (display_srf) to Guest (dmabuf) */
- src_pitch = stdu->display_srf->base_size.width * stdu->cpp;
- src = ttm_kmap_obj_virtual(&stdu->host_map, &not_used);
- src += ddirty->top * src_pitch + ddirty->left * stdu->cpp;
-
- dst_pitch = ddirty->pitch;
- dst = ttm_kmap_obj_virtual(&guest_map, &not_used);
- dst += ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
-
-
- /* Figure out the real direction */
- if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
- u8 *tmp;
- s32 tmp_pitch;
-
- tmp = src;
- tmp_pitch = src_pitch;
+ /* Assume we are blitting from Guest (dmabuf) to Host (display_srf) */
+ dst_pitch = stdu->display_srf->base_size.width * stdu->cpp;
+ dst_bo = &stdu->display_srf->res.backup->base;
+ dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
- src = dst;
- src_pitch = dst_pitch;
+ src_pitch = ddirty->pitch;
+ src_bo = &ddirty->buf->base;
+ src_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp;
- dst = tmp;
- dst_pitch = tmp_pitch;
+ /* Swap src and dst if the assumption was wrong. */
+ if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM) {
+ swap(dst_pitch, src_pitch);
+ swap(dst_bo, src_bo);
+ swap(src_offset, dst_offset);
}
- /* CPU Blit */
- while (height-- > 0) {
- memcpy(dst, src, width * stdu->cpp);
- dst += dst_pitch;
- src += src_pitch;
- }
+ (void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
+ src_bo, src_offset, src_pitch,
+ width * stdu->cpp, height, &diff);
- if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
+ if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM &&
+ drm_rect_visible(&diff.rect)) {
struct vmw_private *dev_priv;
struct vmw_stdu_update *cmd;
struct drm_clip_rect region;
int ret;
/* We are updating the actual surface, not a proxy */
- region.x1 = ddirty->left;
- region.x2 = ddirty->right;
- region.y1 = ddirty->top;
- region.y2 = ddirty->bottom;
+ region.x1 = diff.rect.x1;
+ region.x2 = diff.rect.x2;
+ region.y1 = diff.rect.y1;
+ region.y2 = diff.rect.y2;
ret = vmw_kms_update_proxy(
(struct vmw_resource *) &stdu->display_srf->res,
(const struct drm_clip_rect *) &region, 1, 1);
@@ -774,13 +699,12 @@ static void vmw_stdu_dmabuf_cpu_commit(struct vmw_kms_dirty *dirty)
}
vmw_stdu_populate_update(cmd, stdu->base.unit,
- ddirty->left, ddirty->right,
- ddirty->top, ddirty->bottom);
+ region.x1, region.x2,
+ region.y1, region.y2);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
- ttm_bo_kunmap(&guest_map);
out_cleanup:
ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
@@ -802,6 +726,7 @@ out_cleanup:
* @to_surface: Whether to DMA to the screen target system as opposed to
* from the screen target system.
* @interruptible: Whether to perform waits interruptible if possible.
+ * @crtc: If crtc is passed, perform stdu dma on that crtc only.
*
* If DMA-ing till the screen target system, the function will also notify
* the screen target system that a bounding box of the cliprects has been
@@ -818,15 +743,22 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
uint32_t num_clips,
int increment,
bool to_surface,
- bool interruptible)
+ bool interruptible,
+ struct drm_crtc *crtc)
{
struct vmw_dma_buffer *buf =
container_of(vfb, struct vmw_framebuffer_dmabuf, base)->buffer;
struct vmw_stdu_dirty ddirty;
int ret;
+ bool cpu_blit = !(dev_priv->capabilities & SVGA_CAP_3D);
+ /*
+ * VMs without 3D support don't have the surface DMA command and
+ * we'll be using a CPU blit, and the framebuffer should be moved out
+ * of VRAM.
+ */
ret = vmw_kms_helper_buffer_prepare(dev_priv, buf, interruptible,
- false);
+ false, cpu_blit);
if (ret)
return ret;
@@ -845,13 +777,15 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
if (to_surface)
ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
- /* 2D VMs cannot use SVGA_3D_CMD_SURFACE_DMA so do CPU blit instead */
- if (!(dev_priv->capabilities & SVGA_CAP_3D)) {
+
+ if (cpu_blit) {
ddirty.base.fifo_commit = vmw_stdu_dmabuf_cpu_commit;
ddirty.base.clip = vmw_stdu_dmabuf_cpu_clip;
ddirty.base.fifo_reserve_size = 0;
}
+ ddirty.base.crtc = crtc;
+
ret = vmw_kms_helper_dirty(dev_priv, vfb, clips, vclips,
0, 0, num_clips, increment, &ddirty.base);
vmw_kms_helper_buffer_finish(dev_priv, file_priv, buf, NULL,
@@ -963,6 +897,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
* @out_fence: If non-NULL, will return a ref-counted pointer to a
* struct vmw_fence_obj. The returned fence pointer may be NULL in which
* case the device has already synchronized.
+ * @crtc: If crtc is passed, perform surface dirty on that crtc only.
*
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
@@ -975,7 +910,8 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
s32 dest_x,
s32 dest_y,
unsigned num_clips, int inc,
- struct vmw_fence_obj **out_fence)
+ struct vmw_fence_obj **out_fence,
+ struct drm_crtc *crtc)
{
struct vmw_framebuffer_surface *vfbs =
container_of(framebuffer, typeof(*vfbs), base);
@@ -1000,6 +936,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
sdirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_surface_copy) +
sizeof(SVGA3dCopyBox) * num_clips +
sizeof(struct vmw_stdu_update);
+ sdirty.base.crtc = crtc;
sdirty.sid = srf->id;
sdirty.left = sdirty.top = S32_MAX;
sdirty.right = sdirty.bottom = S32_MIN;
@@ -1118,9 +1055,6 @@ vmw_stdu_primary_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- if (vps->host_map.virtual)
- ttm_bo_kunmap(&vps->host_map);
-
if (vps->surf)
WARN_ON(!vps->pinned);
@@ -1282,24 +1216,11 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
* so cache these mappings
*/
if (vps->content_fb_type == SEPARATE_DMA &&
- !(dev_priv->capabilities & SVGA_CAP_3D)) {
- ret = ttm_bo_kmap(&vps->surf->res.backup->base, 0,
- vps->surf->res.backup->base.num_pages,
- &vps->host_map);
- if (ret) {
- DRM_ERROR("Failed to map display buffer to CPU\n");
- goto out_srf_unpin;
- }
-
+ !(dev_priv->capabilities & SVGA_CAP_3D))
vps->cpp = new_fb->pitches[0] / new_fb->width;
- }
return 0;
-out_srf_unpin:
- vmw_resource_unpin(&vps->surf->res);
- vps->pinned--;
-
out_srf_unref:
vmw_surface_unreference(&vps->surf);
return ret;
@@ -1322,41 +1243,104 @@ static void
vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct vmw_private *dev_priv;
- struct vmw_screen_target_display_unit *stdu;
struct vmw_plane_state *vps = vmw_plane_state_to_vps(plane->state);
- struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
+ struct drm_crtc *crtc = plane->state->crtc;
+ struct vmw_screen_target_display_unit *stdu;
+ struct drm_pending_vblank_event *event;
+ struct vmw_private *dev_priv;
int ret;
- stdu = vmw_crtc_to_stdu(crtc);
- dev_priv = vmw_priv(crtc->dev);
+ /*
+ * We cannot really fail this function, so if we do, then output an
+ * error and maintain consistent atomic state.
+ */
+ if (crtc && plane->state->fb) {
+ struct vmw_framebuffer *vfb =
+ vmw_framebuffer_to_vfb(plane->state->fb);
+ struct drm_vmw_rect vclips;
+ stdu = vmw_crtc_to_stdu(crtc);
+ dev_priv = vmw_priv(crtc->dev);
+
+ stdu->display_srf = vps->surf;
+ stdu->content_fb_type = vps->content_fb_type;
+ stdu->cpp = vps->cpp;
+
+ vclips.x = crtc->x;
+ vclips.y = crtc->y;
+ vclips.w = crtc->mode.hdisplay;
+ vclips.h = crtc->mode.vdisplay;
+
+ ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
+ if (ret)
+ DRM_ERROR("Failed to bind surface to STDU.\n");
+
+ if (vfb->dmabuf)
+ ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL,
+ &vclips, 1, 1, true, false,
+ crtc);
+ else
+ ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL,
+ &vclips, NULL, 0, 0,
+ 1, 1, NULL, crtc);
+ if (ret)
+ DRM_ERROR("Failed to update STDU.\n");
- stdu->display_srf = vps->surf;
- stdu->content_fb_type = vps->content_fb_type;
- stdu->cpp = vps->cpp;
- memcpy(&stdu->host_map, &vps->host_map, sizeof(vps->host_map));
+ crtc->primary->fb = plane->state->fb;
+ } else {
+ crtc = old_state->crtc;
+ stdu = vmw_crtc_to_stdu(crtc);
+ dev_priv = vmw_priv(crtc->dev);
- if (!stdu->defined)
- return;
+ /*
+ * When disabling a plane, CRTC and FB should always be NULL
+ * together, otherwise it's an error.
+ * Here primary plane is being disable so blank the screen
+ * target display unit, if not already done.
+ */
+ if (!stdu->defined)
+ return;
- if (plane->state->fb)
- ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res);
- else
ret = vmw_stdu_bind_st(dev_priv, stdu, NULL);
+ if (ret)
+ DRM_ERROR("Failed to blank STDU\n");
+
+ ret = vmw_stdu_update_st(dev_priv, stdu);
+ if (ret)
+ DRM_ERROR("Failed to update STDU.\n");
+
+ return;
+ }
+ event = crtc->state->event;
/*
- * We cannot really fail this function, so if we do, then output an
- * error and quit
+ * In case of failure and other cases, vblank event will be sent in
+ * vmw_du_crtc_atomic_flush.
*/
- if (ret)
- DRM_ERROR("Failed to bind surface to STDU.\n");
- else
- crtc->primary->fb = plane->state->fb;
+ if (event && (ret == 0)) {
+ struct vmw_fence_obj *fence = NULL;
+ struct drm_file *file_priv = event->base.file_priv;
- ret = vmw_stdu_update_st(dev_priv, stdu);
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- if (ret)
- DRM_ERROR("Failed to update STDU.\n");
+ /*
+ * If fence is NULL, then already sync.
+ */
+ if (fence) {
+ ret = vmw_event_fence_action_queue(
+ file_priv, fence, &event->base,
+ &event->event.vbl.tv_sec,
+ &event->event.vbl.tv_usec,
+ true);
+ if (ret)
+ DRM_ERROR("Failed to queue event on fence.\n");
+ else
+ crtc->state->event = NULL;
+
+ vmw_fence_obj_unreference(&fence);
+ }
+ } else {
+ (void) vmw_fifo_flush(dev_priv, false);
+ }
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index db1bb166845e..b236c48bf265 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -345,7 +345,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
dev_priv->used_memory_size -= res->backup_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
- vmw_fifo_resource_dec(dev_priv);
}
/**
@@ -407,6 +406,8 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
vmw_surface_define_encode(srf, cmd);
vmw_fifo_commit(dev_priv, submit_size);
+ vmw_fifo_resource_inc(dev_priv);
+
/*
* Surface memory usage accounting.
*/
@@ -558,6 +559,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
*/
vmw_resource_release_id(res);
+ vmw_fifo_resource_dec(dev_priv);
return 0;
}
@@ -579,15 +581,11 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_resource *res = &srf->res;
BUG_ON(!res_free);
- if (!dev_priv->has_mob)
- vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_init(dev_priv, res, true, res_free,
(dev_priv->has_mob) ? &vmw_gb_surface_func :
&vmw_legacy_surface_func);
if (unlikely(ret != 0)) {
- if (!dev_priv->has_mob)
- vmw_fifo_resource_dec(dev_priv);
res_free(res);
return ret;
}
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 3cd153c6d271..fc4adf3d34e8 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -92,7 +92,8 @@
* struct vga_switcheroo_client - registered client
* @pdev: client pci device
* @fb_info: framebuffer to which console is remapped on switching
- * @pwr_state: current power state
+ * @pwr_state: current power state if manual power control is used.
+ * For driver power control, call vga_switcheroo_pwr_state().
* @ops: client callbacks
* @id: client identifier. Determining the id requires the handler,
* so gpus are initially assigned VGA_SWITCHEROO_UNKNOWN_ID
@@ -104,8 +105,7 @@
* @list: client list
*
* Registered client. A client can be either a GPU or an audio device on a GPU.
- * For audio clients, the @fb_info, @active and @driver_power_control members
- * are bogus.
+ * For audio clients, the @fb_info and @active members are bogus.
*/
struct vga_switcheroo_client {
struct pci_dev *pdev;
@@ -331,8 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
* @ops: client callbacks
* @id: client identifier
*
- * Register audio client (audio device on a GPU). The power state of the
- * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer()
+ * Register audio client (audio device on a GPU). The client is assumed
+ * to use runtime PM. Beforehand, vga_switcheroo_client_probe_defer()
* shall be called to ensure that all prerequisites are met.
*
* Return: 0 on success, -ENOMEM on memory allocation error.
@@ -341,7 +341,7 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
enum vga_switcheroo_client_id id)
{
- return register_client(pdev, ops, id | ID_BIT_AUDIO, false, false);
+ return register_client(pdev, ops, id | ID_BIT_AUDIO, false, true);
}
EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
@@ -406,6 +406,19 @@ bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
}
EXPORT_SYMBOL(vga_switcheroo_client_probe_defer);
+static enum vga_switcheroo_state
+vga_switcheroo_pwr_state(struct vga_switcheroo_client *client)
+{
+ if (client->driver_power_control)
+ if (pm_runtime_enabled(&client->pdev->dev) &&
+ pm_runtime_active(&client->pdev->dev))
+ return VGA_SWITCHEROO_ON;
+ else
+ return VGA_SWITCHEROO_OFF;
+ else
+ return client->pwr_state;
+}
+
/**
* vga_switcheroo_get_client_state() - obtain power state of a given client
* @pdev: client pci device
@@ -425,7 +438,7 @@ enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *pdev)
if (!client)
ret = VGA_SWITCHEROO_NOT_FOUND;
else
- ret = client->pwr_state;
+ ret = vga_switcheroo_pwr_state(client);
mutex_unlock(&vgasr_mutex);
return ret;
}
@@ -598,7 +611,7 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
client_is_vga(client) ? "" : "-Audio",
client->active ? '+' : ' ',
client->driver_power_control ? "Dyn" : "",
- client->pwr_state ? "Pwr" : "Off",
+ vga_switcheroo_pwr_state(client) ? "Pwr" : "Off",
pci_name(client->pdev));
i++;
}
@@ -641,10 +654,8 @@ static void set_audio_state(enum vga_switcheroo_client_id id,
struct vga_switcheroo_client *client;
client = find_client_from_id(&vgasr_priv.clients, id | ID_BIT_AUDIO);
- if (client && client->pwr_state != state) {
+ if (client)
client->ops->set_gpu_state(client->pdev, state);
- client->pwr_state = state;
- }
}
/* stage one happens before delay */
@@ -656,7 +667,7 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
if (!active)
return 0;
- if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
+ if (vga_switcheroo_pwr_state(new_client) == VGA_SWITCHEROO_OFF)
vga_switchon(new_client);
vga_set_default_device(new_client->pdev);
@@ -675,7 +686,9 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
active->active = false;
- set_audio_state(active->id, VGA_SWITCHEROO_OFF);
+ /* let HDA controller autosuspend if GPU uses driver power control */
+ if (!active->driver_power_control)
+ set_audio_state(active->id, VGA_SWITCHEROO_OFF);
if (new_client->fb_info) {
struct fb_event event;
@@ -695,10 +708,12 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
if (new_client->ops->reprobe)
new_client->ops->reprobe(new_client->pdev);
- if (active->pwr_state == VGA_SWITCHEROO_ON)
+ if (vga_switcheroo_pwr_state(active) == VGA_SWITCHEROO_ON)
vga_switchoff(active);
- set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
+ /* let HDA controller autoresume if GPU uses driver power control */
+ if (!new_client->driver_power_control)
+ set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
new_client->active = true;
return 0;
@@ -939,11 +954,6 @@ EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
* Specifying nouveau.runpm=0, radeon.runpm=0 or amdgpu.runpm=0 on the kernel
* command line disables it.
*
- * When the driver decides to power up or down, it notifies vga_switcheroo
- * thereof so that it can (a) power the audio device on the GPU up or down,
- * and (b) update its internal power state representation for the device.
- * This is achieved by vga_switcheroo_set_dynamic_switch().
- *
* After the GPU has been suspended, the handler needs to be called to cut
* power to the GPU. Likewise it needs to reinstate power before the GPU
* can resume. This is achieved by vga_switcheroo_init_domain_pm_ops(),
@@ -951,8 +961,9 @@ EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
* calls to the handler.
*
* When the audio device resumes, the GPU needs to be woken. This is achieved
- * by vga_switcheroo_init_domain_pm_optimus_hdmi_audio(), which augments the
- * audio device's resume function.
+ * by a PCI quirk which calls device_link_add() to declare a dependency on the
+ * GPU. That way, the GPU is kept awake whenever and as long as the audio
+ * device is in use.
*
* On muxed machines, if the mux is initially switched to the discrete GPU,
* the user ends up with a black screen when the GPU powers down after boot.
@@ -978,35 +989,6 @@ static void vga_switcheroo_power_switch(struct pci_dev *pdev,
vgasr_priv.handler->power_state(client->id, state);
}
-/**
- * vga_switcheroo_set_dynamic_switch() - helper for driver power control
- * @pdev: client pci device
- * @dynamic: new power state
- *
- * Helper for GPUs whose power state is controlled by the driver's runtime pm.
- * When the driver decides to power up or down, it notifies vga_switcheroo
- * thereof using this helper so that it can (a) power the audio device on
- * the GPU up or down, and (b) update its internal power state representation
- * for the device.
- */
-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev,
- enum vga_switcheroo_state dynamic)
-{
- struct vga_switcheroo_client *client;
-
- mutex_lock(&vgasr_mutex);
- client = find_client_from_pci(&vgasr_priv.clients, pdev);
- if (!client || !client->driver_power_control) {
- mutex_unlock(&vgasr_mutex);
- return;
- }
-
- client->pwr_state = dynamic;
- set_audio_state(client->id, dynamic);
- mutex_unlock(&vgasr_mutex);
-}
-EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch);
-
/* switcheroo power domain */
static int vga_switcheroo_runtime_suspend(struct device *dev)
{
@@ -1022,6 +1004,7 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
vgasr_priv.handler->switchto(VGA_SWITCHEROO_IGD);
mutex_unlock(&vgasr_priv.mux_hw_lock);
}
+ pci_bus_set_current_state(pdev->bus, PCI_D3cold);
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
mutex_unlock(&vgasr_mutex);
return 0;
@@ -1035,6 +1018,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
mutex_lock(&vgasr_mutex);
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
mutex_unlock(&vgasr_mutex);
+ pci_wakeup_bus(pdev->bus);
ret = dev->bus->pm->runtime_resume(dev);
if (ret)
return ret;
@@ -1076,69 +1060,3 @@ void vga_switcheroo_fini_domain_pm_ops(struct device *dev)
dev_pm_domain_set(dev, NULL);
}
EXPORT_SYMBOL(vga_switcheroo_fini_domain_pm_ops);
-
-static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct vga_switcheroo_client *client;
- struct device *video_dev = NULL;
- int ret;
-
- /* we need to check if we have to switch back on the video
- * device so the audio device can come back
- */
- mutex_lock(&vgasr_mutex);
- list_for_each_entry(client, &vgasr_priv.clients, list) {
- if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) &&
- client_is_vga(client)) {
- video_dev = &client->pdev->dev;
- break;
- }
- }
- mutex_unlock(&vgasr_mutex);
-
- if (video_dev) {
- ret = pm_runtime_get_sync(video_dev);
- if (ret && ret != 1)
- return ret;
- }
- ret = dev->bus->pm->runtime_resume(dev);
-
- /* put the reference for the gpu */
- if (video_dev) {
- pm_runtime_mark_last_busy(video_dev);
- pm_runtime_put_autosuspend(video_dev);
- }
- return ret;
-}
-
-/**
- * vga_switcheroo_init_domain_pm_optimus_hdmi_audio() - helper for driver
- * power control
- * @dev: audio client device
- * @domain: power domain
- *
- * Helper for GPUs whose power state is controlled by the driver's runtime pm.
- * When the audio device resumes, the GPU needs to be woken. This helper
- * augments the audio device's resume function to do that.
- *
- * Return: 0 on success, -EINVAL if no power management operations are
- * defined for this device.
- */
-int
-vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev,
- struct dev_pm_domain *domain)
-{
- /* copy over all the bus versions */
- if (dev->bus && dev->bus->pm) {
- domain->ops = *dev->bus->pm;
- domain->ops.runtime_resume =
- vga_switcheroo_runtime_resume_hdmi_audio;
-
- dev_pm_domain_set(dev, domain);
- return 0;
- }
- dev_pm_domain_set(dev, NULL);
- return -EINVAL;
-}
-EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_optimus_hdmi_audio);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 3bed6beda051..6a67cdbd0e6a 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1224,11 +1224,14 @@ static int pci_pm_runtime_suspend(struct device *dev)
int error;
/*
- * If pci_dev->driver is not set (unbound), the device should
- * always remain in D0 regardless of the runtime PM status
+ * If pci_dev->driver is not set (unbound), we leave the device in D0,
+ * but it may go to D3cold when the bridge above it runtime suspends.
+ * Save its config space in case that happens.
*/
- if (!pci_dev->driver)
+ if (!pci_dev->driver) {
+ pci_save_state(pci_dev);
return 0;
+ }
if (!pm || !pm->runtime_suspend)
return -ENOSYS;
@@ -1276,16 +1279,18 @@ static int pci_pm_runtime_resume(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
/*
- * If pci_dev->driver is not set (unbound), the device should
- * always remain in D0 regardless of the runtime PM status
+ * Restoring config space is necessary even if the device is not bound
+ * to a driver because although we left it in D0, it may have gone to
+ * D3cold when the bridge above it runtime suspended.
*/
+ pci_restore_standard_config(pci_dev);
+
if (!pci_dev->driver)
return 0;
if (!pm || !pm->runtime_resume)
return -ENOSYS;
- pci_restore_standard_config(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
pci_enable_wake(pci_dev, PCI_D0, false);
pci_fixup_device(pci_fixup_resume, pci_dev);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index f6a4dd10d9b0..bd6f156dc3cf 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -800,7 +800,7 @@ static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
* pci_wakeup_bus - Walk given bus and wake up devices on it
* @bus: Top bus of the subtree to walk.
*/
-static void pci_wakeup_bus(struct pci_bus *bus)
+void pci_wakeup_bus(struct pci_bus *bus)
{
if (bus)
pci_walk_bus(bus, pci_wakeup, NULL);
@@ -850,11 +850,11 @@ static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
}
/**
- * __pci_bus_set_current_state - Walk given bus and set current state of devices
+ * pci_bus_set_current_state - Walk given bus and set current state of devices
* @bus: Top bus of the subtree to walk.
* @state: state to be set
*/
-static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
+void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
{
if (bus)
pci_walk_bus(bus, __pci_dev_set_current_state, &state);
@@ -876,7 +876,7 @@ int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
ret = pci_platform_power_transition(dev, state);
/* Power off the bridge may power off the whole hierarchy */
if (!ret && state == PCI_D3cold)
- __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
+ pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
return ret;
}
EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fc734014206f..ec582d37c189 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -26,6 +26,7 @@
#include <linux/ktime.h>
#include <linux/mm.h>
#include <linux/platform_data/x86/apple.h>
+#include <linux/pm_runtime.h>
#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include "pci.h"
@@ -4832,3 +4833,41 @@ static void quirk_fsl_no_msi(struct pci_dev *pdev)
pdev->no_msi = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, quirk_fsl_no_msi);
+
+/*
+ * GPUs with integrated HDA controller for streaming audio to attached displays
+ * need a device link from the HDA controller (consumer) to the GPU (supplier)
+ * so that the GPU is powered up whenever the HDA controller is accessed.
+ * The GPU and HDA controller are functions 0 and 1 of the same PCI device.
+ * The device link stays in place until shutdown (or removal of the PCI device
+ * if it's hotplugged). Runtime PM is allowed by default on the HDA controller
+ * to prevent it from permanently keeping the GPU awake.
+ */
+static void quirk_gpu_hda(struct pci_dev *hda)
+{
+ struct pci_dev *gpu;
+
+ if (PCI_FUNC(hda->devfn) != 1)
+ return;
+
+ gpu = pci_get_domain_bus_and_slot(pci_domain_nr(hda->bus),
+ hda->bus->number,
+ PCI_DEVFN(PCI_SLOT(hda->devfn), 0));
+ if (!gpu || (gpu->class >> 16) != PCI_BASE_CLASS_DISPLAY) {
+ pci_dev_put(gpu);
+ return;
+ }
+
+ if (!device_link_add(&hda->dev, &gpu->dev,
+ DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME))
+ pci_err(hda, "cannot link HDA to GPU %s\n", pci_name(gpu));
+
+ pm_runtime_allow(&hda->dev);
+ pci_dev_put(gpu);
+}
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, PCI_ANY_ID,
+ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_AMD, PCI_ANY_ID,
+ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
+DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
+ PCI_CLASS_MULTIMEDIA_HD_AUDIO, 8, quirk_gpu_hda);
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index 711fff9b6803..e9a1116d2f8e 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -41,7 +41,7 @@ struct analogix_dp_plat_data {
struct drm_connector *);
};
-int analogix_dp_psr_supported(struct analogix_dp_device *dp);
+int analogix_dp_psr_enabled(struct analogix_dp_device *dp);
int analogix_dp_enable_psr(struct analogix_dp_device *dp);
int analogix_dp_disable_psr(struct analogix_dp_device *dp);
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index b3b6d302ca8c..44f04233e3db 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -38,6 +38,18 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
int gamma_size);
+/**
+ * drm_color_lut_size - calculate the number of entries in the LUT
+ * @blob: blob containing the LUT
+ *
+ * Returns:
+ * The number of entries in the color LUT stored in @blob.
+ */
+static inline int drm_color_lut_size(const struct drm_property_blob *blob)
+{
+ return blob->length / sizeof(struct drm_color_lut);
+}
+
enum drm_color_encoding {
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_BT709,
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 4de97e94ef9d..62903bae0221 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -288,6 +288,7 @@
#define DP_PSR_SUPPORT 0x070 /* XXX 1.2? */
# define DP_PSR_IS_SUPPORTED 1
# define DP_PSR2_IS_SUPPORTED 2 /* eDP 1.4 */
+# define DP_PSR2_WITH_Y_COORD_IS_SUPPORTED 3 /* eDP 1.4a */
#define DP_PSR_CAPS 0x071 /* XXX 1.2? */
# define DP_PSR_NO_TRAIN_ON_EXIT 1
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index 7ba3913f30b5..c34a3e8030e1 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -120,30 +120,6 @@ struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
void drm_mode_object_get(struct drm_mode_object *obj);
void drm_mode_object_put(struct drm_mode_object *obj);
-/**
- * drm_mode_object_reference - acquire a mode object reference
- * @obj: DRM mode object
- *
- * This is a compatibility alias for drm_mode_object_get() and should not be
- * used by new code.
- */
-static inline void drm_mode_object_reference(struct drm_mode_object *obj)
-{
- drm_mode_object_get(obj);
-}
-
-/**
- * drm_mode_object_unreference - release a mode object reference
- * @obj: DRM mode object
- *
- * This is a compatibility alias for drm_mode_object_put() and should not be
- * used by new code.
- */
-static inline void drm_mode_object_unreference(struct drm_mode_object *obj)
-{
- drm_mode_object_put(obj);
-}
-
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t val);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 2a4a42e59a47..e1a46e9991cc 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -196,21 +196,22 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
#define DRM_UT_STATE 0x40
#define DRM_UT_LEASE 0x80
-__printf(6, 7)
+__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
- unsigned int category, const char *function_name,
- const char *prefix, const char *format, ...);
+ const char *format, ...);
__printf(3, 4)
-void drm_printk(const char *level, unsigned int category,
- const char *format, ...);
+void drm_dev_dbg(const struct device *dev, unsigned int category,
+ const char *format, ...);
+
+__printf(2, 3)
+void drm_dbg(unsigned int category, const char *format, ...);
+__printf(1, 2)
+void drm_err(const char *format, ...);
/* Macros to make printk easier */
#define _DRM_PRINTK(once, level, fmt, ...) \
- do { \
- printk##once(KERN_##level "[" DRM_NAME "] " fmt, \
- ##__VA_ARGS__); \
- } while (0)
+ printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
#define DRM_INFO(fmt, ...) \
_DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
@@ -233,10 +234,9 @@ void drm_printk(const char *level, unsigned int category,
* @fmt: printf() like format string.
*/
#define DRM_DEV_ERROR(dev, fmt, ...) \
- drm_dev_printk(dev, KERN_ERR, DRM_UT_NONE, __func__, " *ERROR*",\
- fmt, ##__VA_ARGS__)
+ drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__)
#define DRM_ERROR(fmt, ...) \
- drm_printk(KERN_ERR, DRM_UT_NONE, fmt, ##__VA_ARGS__)
+ drm_err(fmt, ##__VA_ARGS__)
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
@@ -257,8 +257,7 @@ void drm_printk(const char *level, unsigned int category,
DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
#define DRM_DEV_INFO(dev, fmt, ...) \
- drm_dev_printk(dev, KERN_INFO, DRM_UT_NONE, __func__, "", fmt, \
- ##__VA_ARGS__)
+ drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
({ \
@@ -275,53 +274,46 @@ void drm_printk(const char *level, unsigned int category,
* @dev: device pointer
* @fmt: printf() like format string.
*/
-#define DRM_DEV_DEBUG(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_CORE, __func__, "", fmt, \
- ##args)
+#define DRM_DEV_DEBUG(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
#define DRM_DEBUG(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+ drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_DRIVER(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_DRIVER, __func__, "", \
- fmt, ##args)
+#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_DRIVER(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+ drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_KMS(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_KMS, __func__, "", fmt, \
- ##args)
-#define DRM_DEBUG_KMS(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_KMS(fmt, ...) \
+ drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_PRIME(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_PRIME, __func__, "", \
- fmt, ##args)
+#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_PRIME(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+ drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ATOMIC, __func__, "", \
- fmt, ##args)
+#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_ATOMIC(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+ drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_VBL(dev, fmt, args...) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_VBL, __func__, "", fmt, \
- ##args)
-#define DRM_DEBUG_VBL(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_VBL(fmt, ...) \
+ drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define DRM_DEBUG_LEASE(fmt, ...) \
- drm_printk(KERN_DEBUG, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+ drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
-#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, level, fmt, args...) \
+#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
if (__ratelimit(&_rs)) \
- drm_dev_printk(dev, KERN_DEBUG, DRM_UT_ ## level, \
- __func__, "", fmt, ##args); \
+ drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__); \
})
/**
@@ -330,21 +322,28 @@ void drm_printk(const char *level, unsigned int category,
* @dev: device pointer
* @fmt: printf() like format string.
*/
-#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, args...) \
- DEV__DRM_DEFINE_DEBUG_RATELIMITED(dev, CORE, fmt, ##args)
-#define DRM_DEBUG_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##args)
-#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, args...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRIVER, fmt, ##args)
-#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##args)
-#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, args...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, KMS, fmt, ##args)
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##args)
-#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, args...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, PRIME, fmt, ##args)
-#define DRM_DEBUG_PRIME_RATELIMITED(fmt, args...) \
- DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##args)
+#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \
+ _DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \
+ _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \
+ fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
#endif /* DRM_PRINT_H_ */
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 937757a8a568..d1423c7f3c73 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -209,7 +209,7 @@ struct drm_property_blob {
struct list_head head_global;
struct list_head head_file;
size_t length;
- unsigned char data[];
+ void *data;
};
struct drm_prop_enum_list {
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 8e2fb1ac4e0c..c67977aa1a0e 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -709,6 +709,10 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev);
+void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot);
+
+void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot);
+
/**
* ttm_bo_io
*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 024a1beda008..ae42289662df 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1147,6 +1147,8 @@ void pci_pme_wakeup_bus(struct pci_bus *bus);
void pci_d3cold_enable(struct pci_dev *dev);
void pci_d3cold_disable(struct pci_dev *dev);
bool pcie_relaxed_ordering_enabled(struct pci_dev *dev);
+void pci_wakeup_bus(struct pci_bus *bus);
+void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state);
/* PCI Virtual Channel */
int pci_save_vc_state(struct pci_dev *dev);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a6b30667a331..a637a7d8ce5b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -45,6 +45,7 @@
#define PCI_CLASS_MULTIMEDIA_VIDEO 0x0400
#define PCI_CLASS_MULTIMEDIA_AUDIO 0x0401
#define PCI_CLASS_MULTIMEDIA_PHONE 0x0402
+#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
#define PCI_CLASS_MULTIMEDIA_OTHER 0x0480
#define PCI_BASE_CLASS_MEMORY 0x05
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 960bedbdec87..77f0f0af3a71 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -168,11 +168,8 @@ int vga_switcheroo_process_delayed_switch(void);
bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev);
enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev);
-void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
-
int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
void vga_switcheroo_fini_domain_pm_ops(struct device *dev);
-int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
#else
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
@@ -192,11 +189,8 @@ static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }
static inline enum vga_switcheroo_state vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
-static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
-
static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
static inline void vga_switcheroo_fini_domain_pm_ops(struct device *dev) {}
-static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
#endif
#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 68169e3749de..5b2ed12f58ce 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -227,9 +227,6 @@ struct hdac_io_ops {
#define HDA_UNSOL_QUEUE_SIZE 64
#define HDA_MAX_CODECS 8 /* limit by controller side */
-/* HD Audio class code */
-#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
-
/*
* CORB/RIRB
*
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index e9b997a0ef27..0d5c49dc478c 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -55,6 +55,12 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
+#define ETNAVIV_PARAM_GPU_FEATURES_7 0x0a
+#define ETNAVIV_PARAM_GPU_FEATURES_8 0x0b
+#define ETNAVIV_PARAM_GPU_FEATURES_9 0x0c
+#define ETNAVIV_PARAM_GPU_FEATURES_10 0x0d
+#define ETNAVIV_PARAM_GPU_FEATURES_11 0x0e
+#define ETNAVIV_PARAM_GPU_FEATURES_12 0x0f
#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index bbbaffad772d..c06d0a5bdd80 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -201,10 +201,12 @@ struct drm_msm_gem_submit_bo {
#define MSM_SUBMIT_NO_IMPLICIT 0x80000000 /* disable implicit sync */
#define MSM_SUBMIT_FENCE_FD_IN 0x40000000 /* enable input fence_fd */
#define MSM_SUBMIT_FENCE_FD_OUT 0x20000000 /* enable output fence_fd */
+#define MSM_SUBMIT_SUDO 0x10000000 /* run submitted cmds from RB */
#define MSM_SUBMIT_FLAGS ( \
MSM_SUBMIT_NO_IMPLICIT | \
MSM_SUBMIT_FENCE_FD_IN | \
MSM_SUBMIT_FENCE_FD_OUT | \
+ MSM_SUBMIT_SUDO | \
0)
/* Each cmdstream submit consists of a table of buffers involved, and
diff --git a/scripts/coccinelle/api/drm-get-put.cocci b/scripts/coccinelle/api/drm-get-put.cocci
index 91fceb8f1fa2..ceb71ea7f61c 100644
--- a/scripts/coccinelle/api/drm-get-put.cocci
+++ b/scripts/coccinelle/api/drm-get-put.cocci
@@ -16,12 +16,6 @@ expression object;
@@
(
-- drm_mode_object_reference(object)
-+ drm_mode_object_get(object)
-|
-- drm_mode_object_unreference(object)
-+ drm_mode_object_put(object)
-|
- drm_connector_reference(object)
+ drm_connector_get(object)
|
@@ -62,10 +56,6 @@ position p;
@@
(
-drm_mode_object_unreference@p(object)
-|
-drm_mode_object_reference@p(object)
-|
drm_connector_unreference@p(object)
|
drm_connector_reference@p(object)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index c71dcacea807..ec4e6b829ee2 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1227,6 +1227,7 @@ static void azx_vs_set_state(struct pci_dev *pci,
struct snd_card *card = pci_get_drvdata(pci);
struct azx *chip = card->private_data;
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+ struct hda_codec *codec;
bool disabled;
wait_for_completion(&hda->probe_wait);
@@ -1251,8 +1252,12 @@ static void azx_vs_set_state(struct pci_dev *pci,
dev_info(chip->card->dev, "%s via vga_switcheroo\n",
disabled ? "Disabling" : "Enabling");
if (disabled) {
- pm_runtime_put_sync_suspend(card->dev);
- azx_suspend(card->dev);
+ list_for_each_codec(codec, &chip->bus) {
+ pm_runtime_suspend(hda_codec_dev(codec));
+ pm_runtime_disable(hda_codec_dev(codec));
+ }
+ pm_runtime_suspend(card->dev);
+ pm_runtime_disable(card->dev);
/* when we get suspended by vga_switcheroo we end up in D3cold,
* however we have no ACPI handle, so pci/acpi can't put us there,
* put ourselves there */
@@ -1263,9 +1268,12 @@ static void azx_vs_set_state(struct pci_dev *pci,
"Cannot lock devices!\n");
} else {
snd_hda_unlock_devices(&chip->bus);
- pm_runtime_get_noresume(card->dev);
chip->disabled = false;
- azx_resume(card->dev);
+ pm_runtime_enable(card->dev);
+ list_for_each_codec(codec, &chip->bus) {
+ pm_runtime_enable(hda_codec_dev(codec));
+ pm_runtime_resume(hda_codec_dev(codec));
+ }
}
}
}
@@ -1295,6 +1303,7 @@ static void init_vga_switcheroo(struct azx *chip)
dev_info(chip->card->dev,
"Handle vga_switcheroo audio client\n");
hda->use_vga_switcheroo = 1;
+ chip->driver_caps |= AZX_DCAPS_PM_RUNTIME;
pci_dev_put(p);
}
}
@@ -1320,9 +1329,6 @@ static int register_vga_switcheroo(struct azx *chip)
return err;
hda->vga_switcheroo_registered = 1;
- /* register as an optimus hdmi audio power domain */
- vga_switcheroo_init_domain_pm_optimus_hdmi_audio(chip->card->dev,
- &hda->hdmi_pm_domain);
return 0;
}
#else
@@ -1351,10 +1357,8 @@ static int azx_free(struct azx *chip)
if (use_vga_switcheroo(hda)) {
if (chip->disabled && hda->probe_continued)
snd_hda_unlock_devices(&chip->bus);
- if (hda->vga_switcheroo_registered) {
+ if (hda->vga_switcheroo_registered)
vga_switcheroo_unregister_client(chip->pci);
- vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
- }
}
if (bus->chip_init) {
@@ -2197,6 +2201,7 @@ static int azx_probe_continue(struct azx *chip)
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct hdac_bus *bus = azx_bus(chip);
struct pci_dev *pci = chip->pci;
+ struct hda_codec *codec;
int dev = chip->dev_index;
int err;
@@ -2278,8 +2283,17 @@ static int azx_probe_continue(struct azx *chip)
chip->running = 1;
azx_add_card_list(chip);
+
+ /*
+ * The discrete GPU cannot power down unless the HDA controller runtime
+ * suspends, so activate runtime PM on codecs even if power_save == 0.
+ */
+ if (use_vga_switcheroo(hda))
+ list_for_each_codec(codec, &chip->bus)
+ codec->auto_runtime_pm = 1;
+
snd_hda_set_power_save(&chip->bus, power_save * 1000);
- if (azx_has_pm_runtime(chip) || hda->use_vga_switcheroo)
+ if (azx_has_pm_runtime(chip))
pm_runtime_put_autosuspend(&pci->dev);
out_free:
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
index ff0c4d617bc1..e3a3d318d2e5 100644
--- a/sound/pci/hda/hda_intel.h
+++ b/sound/pci/hda/hda_intel.h
@@ -40,9 +40,6 @@ struct hda_intel {
unsigned int vga_switcheroo_registered:1;
unsigned int init_failed:1; /* delayed init failed */
- /* secondary power domain for hdmi audio under vga device */
- struct dev_pm_domain hdmi_pm_domain;
-
bool need_i915_power:1; /* the hda controller needs i915 power */
};