summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2020-11-04 10:55:11 +1000
committerDave Airlie <airlied@redhat.com>2020-11-04 11:49:10 +1000
commit1cd260a7905e3ba2e5dfa39b110ad6cf8f466f49 (patch)
treebfb701fdb0fcb32f8e6e53fb1692361c8fa33a6a
parent3cea11cd5e3b00d91caf0b4730194039b45c5891 (diff)
parent4dfec0d1d7b9970f36931de714b379dbeaed83f8 (diff)
Merge tag 'drm-misc-next-2020-10-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.11: UAPI Changes: - doc: rules for EBUSY on non-blocking commits; requirements for fourcc modifiers; on parsing EDID - fbdev/sbuslib: Remove unused FBIOSCURSOR32 - fourcc: deprecate DRM_FORMAT_MOD_NONE - virtio: Support blob resources for memory allocations; Expose host-visible and cross-device features Cross-subsystem Changes: - devicetree: Add vendor Prefix for Yes Optoelectronics, Shanghai Top Display Optoelectronics - dma-buf: Add struct dma_buf_map that stores DMA pointer and I/O-memory flag; dma_buf_vmap()/vunmap() return address in dma_buf_map; Use struct_size() macro Core Changes: - atomic: pass full state to CRTC atomic enable/disable; warn for EBUSY during non-blocking commits - dp: Prepare for DP 2.0 DPCD - dp_mst: Receive extended DPCD caps - dma-buf: Documentation - doc: Format modifiers; dma-buf-map; Cleanups - fbdev: Don't use compat_alloc_user_space(); mark as orphaned - fb-helper: Take lock in drm_fb_helper_restore_work_fb() - gem: Convert implementation and drivers to GEM object functions, remove GEM callbacks from struct drm_driver (expect gem_prime_mmap) - panel: Cleanups - pci: Add legacy infix to drm_irq_by_busid() - sched: Avoid infinite waits in drm_sched_entity_destroy() - switcheroo: Cleanups - ttm: Remove AGP support; Don't modify caching during swapout; Major refactoring of the implementation and API that affects all depending drivers; Add ttm_bo_wait_ctx(); Add ttm_bo_pin()/unpin() in favor of TTM_PL_FLAG_NO_EVICT; Remove ttm_bo_create(); Remove fault_reserve_notify() callback; Push move() implementation into drivers; Remove TTM_PAGE_FLAG_WRITE; Replace caching flags with init-time cache setting; Push ttm_tt_bind() into drivers; Replace move_notify() with delete_mem_notify(); No overlapping memcpy(); no more ttm_set_populated() - vram-helper: Fix BO top-down placement; TTM-related changes; Init GEM object functions with defaults; Default placement in system memory; Cleanups Driver Changes: - amdgpu: Use GEM object functions - armada: Use GEM object functions - aspeed: Configure output via sysfs; Init struct drm_driver with - ast: Reload LUT after FB format changes - bridge: Add driver and DT bindings for anx7625; Cleanups - bridge/dw-hdmi: Constify ops - bridge/ti-sn65dsi86: Add retries for link training - bridge/lvds-codec: Add support for regulator - bridge/tc358768: Restore connector support DRM_GEM_CMA_DRIVEROPS; Cleanups - display/ti,j721e-dss: Add DT properies assigned-clocks, assigned-clocks-parent and dma-coherent - display/ti,am65s-dss: Add DT properies assigned-clocks, assigned-clocks-parent and dma-coherent - etnaviv: Use GEM object functions - exynos: Use GEM object functions - fbdev: Cleanups and compiler fixes throughout framebuffer drivers - fbdev/cirrusfb: Avoid division by 0 - gma500: Use GEM object functions; Fix double-free of connector; Cleanups - hisilicon/hibmc: I2C-based DDC support; Use to_hibmc_drm_device(); Cleanups - i915: Use GEM object functions - imx/dcss: Init driver with DRM_GEM_CMA_DRIVER_OPS; Cleanups - ingenic: Reset pixel clock when parent clock changes; support reserved memory; Alloc F0 and F1 DMA channels at once; Support different pixel formats; Revert support for cached mmap buffers on F0/F1; support 30-bit/24-bit/8-bit-palette modes - komeda: Use DEFINE_SHOW_ATTRIBUTE - mcde: Detect platform_get_irq() errors - mediatek: Use GEM object functions - msm: Use GEM object functions - nouveau: Cleanups; TTM-related changes; Use GEM object functions - omapdrm: Use GEM object functions - panel: Add driver and DT bindings for Novatak nt36672a; Add driver and DT bindings for YTC700TLAG-05-201C; Add driver and DT bindings for TDO TL070WSH30; Cleanups - panel/mantix: Fix reset; Fix deref of NULL pointer in mantix_get_modes() - panel/otm8009a: Allow non-continuous dsi clock; Cleanups - panel/rm68200: Allow non-continuous dsi clock; Fix mode to 50 FPS - panfrost: Fix job timeout handling; Cleanups - pl111: Use GEM object functions - qxl: Cleanups; TTM-related changes; Pin new BOs with ttm_bo_init_reserved() - radeon: Cleanups; TTM-related changes; Use GEM object functions - rockchip: Use GEM object functions - shmobile: Cleanups - tegra: Use GEM object functions - tidss: Set drm_plane_helper_funcs.prepare_fb - tilcdc: Don't keep vblank interrupt enabled all the time - tve200: Detect platform_get_irq() errors - vc4: Use GEM object functions; Only register components once DSI is attached; Add Maxime as maintainer - vgem: Use GEM object functions - via: Simplify critical section in via_mem_alloc() - virtgpu: Use GEM object functions - virtio: Implement blob resources, host-visible and cross-device features; Support mapping of host-allocated resources; Use UUID APi; Cleanups - vkms: Use GEM object functions; Switch to SHMEM - vmwgfx: TTM-related changes; Inline ttm_bo_swapout_all() - xen: Use GEM object functions - xlnx: Use GEM object functions Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20201027100936.GA4858@linux-uq9g
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml95
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml87
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml11
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml11
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml4
-rw-r--r--Documentation/driver-api/dma-buf.rst9
-rw-r--r--Documentation/gpu/drm-mm.rst4
-rw-r--r--Documentation/gpu/todo.rst12
-rw-r--r--Documentation/gpu/vkms.rst99
-rw-r--r--MAINTAINERS11
-rw-r--r--drivers/dma-buf/dma-buf.c63
-rw-r--r--drivers/dma-buf/dma-resv.c2
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.c10
-rw-r--r--drivers/gpu/drm/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c16
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c6
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c8
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c12
-rw-r--r--drivers/gpu/drm/armada/armada_gem.h2
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig1
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx.h2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c78
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c23
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c4
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c6
-rw-r--r--drivers/gpu/drm/bridge/analogix/Kconfig9
-rw-r--r--drivers/gpu/drm/bridge/analogix/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c1850
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h390
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c107
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c43
-rw-r--r--drivers/gpu/drm/drm_atomic.c39
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c24
-rw-r--r--drivers/gpu/drm/drm_connector.c5
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c7
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c26
-rw-r--r--drivers/gpu/drm/drm_fourcc.c1
-rw-r--r--drivers/gpu/drm/drm_gem.c53
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c28
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c17
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c175
-rw-r--r--drivers/gpu/drm/drm_internal.h8
-rw-r--r--drivers/gpu/drm/drm_ioctl.c2
-rw-r--r--drivers/gpu/drm/drm_pci.c6
-rw-r--r--drivers/gpu/drm/drm_prime.c29
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c13
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c19
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c15
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c6
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c7
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c2
-rw-r--r--drivers/gpu/drm/gma500/gem.c18
-rw-r--r--drivers/gpu/drm/gma500/gem.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c9
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Makefile2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c87
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c18
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h33
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c99
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c42
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h3
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c18
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c3
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-crtc.c9
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c14
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c6
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c272
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.h3
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c11
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c19
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c266
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c18
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.h2
-rw-r--r--drivers/gpu/drm/panel/Kconfig21
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672a.c711
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c20
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c14
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c7
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c29
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c2
-rw-r--r--drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c250
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c7
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c7
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c62
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c72
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h25
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c104
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c126
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c176
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c13
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c3
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c4
-rw-r--r--drivers/gpu/drm/stm/ltdc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c4
-rw-r--r--drivers/gpu/drm/tegra/dc.c8
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c31
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c6
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c56
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c38
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h7
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c192
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c103
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c40
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c89
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c77
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_set_memory.h (renamed from include/drm/ttm/ttm_set_memory.h)66
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c250
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c4
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c21
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c25
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c9
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c21
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/drm/virtio/Makefile2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h79
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c185
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c34
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c37
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c23
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c156
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c164
-rw-r--r--drivers/gpu/drm/vkms/Makefile1
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c17
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c4
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c30
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h29
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c248
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c13
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c95
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c197
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c44
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c15
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c6
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c14
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c4
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c7
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c17
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c19
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c21
-rw-r--r--drivers/misc/fastrpc.c6
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c4
-rw-r--r--drivers/video/fbdev/cirrusfb.c3
-rw-r--r--drivers/video/fbdev/core/fbmem.c44
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c3
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c8
-rw-r--r--drivers/video/fbdev/mx3fb.c2
-rw-r--r--drivers/video/fbdev/nvidia/nv_of.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c7
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c16
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/video-pll.c20
-rw-r--r--drivers/video/fbdev/sbuslib.c124
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c6
-rw-r--r--drivers/video/fbdev/sis/300vtbl.h2
-rw-r--r--drivers/video/fbdev/sis/sis_accel.h14
-rw-r--r--include/drm/drm_dp_helper.h72
-rw-r--r--include/drm/drm_drv.h85
-rw-r--r--include/drm/drm_gem.h2
-rw-r--r--include/drm/drm_gem_vram_helper.h3
-rw-r--r--include/drm/drm_mode_config.h13
-rw-r--r--include/drm/drm_modeset_helper_vtables.h13
-rw-r--r--include/drm/drm_prime.h5
-rw-r--r--include/drm/ttm/ttm_bo_api.h83
-rw-r--r--include/drm/ttm/ttm_bo_driver.h91
-rw-r--r--include/drm/ttm/ttm_caching.h34
-rw-r--r--include/drm/ttm/ttm_page_alloc.h12
-rw-r--r--include/drm/ttm/ttm_placement.h15
-rw-r--r--include/drm/ttm/ttm_resource.h12
-rw-r--r--include/drm/ttm/ttm_tt.h82
-rw-r--r--include/linux/dma-buf-map.h193
-rw-r--r--include/linux/dma-buf.h14
-rw-r--r--include/linux/platform_data/shmob_drm.h2
-rw-r--r--include/uapi/drm/drm_fourcc.h41
-rw-r--r--include/uapi/drm/virtgpu_drm.h39
-rw-r--r--include/uapi/linux/virtio_gpu.h78
291 files changed, 7523 insertions, 3257 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
new file mode 100644
index 000000000000..60585a4fc22b
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7625.yaml
@@ -0,0 +1,95 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Analogix Semiconductor, Inc.
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/bridge/analogix,anx7625.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Analogix ANX7625 SlimPort (4K Mobile HD Transmitter)
+
+maintainers:
+ - Xin Ji <xji@analogixsemi.com>
+
+description: |
+ The ANX7625 is an ultra-low power 4K Mobile HD Transmitter
+ designed for portable devices.
+
+properties:
+ compatible:
+ items:
+ - const: analogix,anx7625
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: used for interrupt pin B8.
+ maxItems: 1
+
+ enable-gpios:
+ description: used for power on chip control, POWER_EN pin D2.
+ maxItems: 1
+
+ reset-gpios:
+ description: used for reset chip control, RESET_N pin B7.
+ maxItems: 1
+
+ ports:
+ type: object
+
+ properties:
+ port@0:
+ type: object
+ description:
+ Video port for MIPI DSI input.
+
+ port@1:
+ type: object
+ description:
+ Video port for panel or connector.
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ encoder@58 {
+ compatible = "analogix,anx7625";
+ reg = <0x58>;
+ enable-gpios = <&pio 45 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&pio 73 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mipi2dp_bridge_in: port@0 {
+ reg = <0>;
+ anx7625_in: endpoint {
+ remote-endpoint = <&mipi_dsi>;
+ };
+ };
+
+ mipi2dp_bridge_out: port@1 {
+ reg = <1>;
+ anx7625_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
new file mode 100644
index 000000000000..d2170de6b723
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
@@ -0,0 +1,87 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/novatek,nt36672a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Novatek NT36672A based DSI display Panels
+
+maintainers:
+ - Sumit Semwal <sumit.semwal@linaro.org>
+
+description: |
+ The nt36672a IC from Novatek is a generic DSI Panel IC used to drive dsi
+ panels.
+ Right now, support is added only for a Tianma FHD+ LCD display panel with a
+ resolution of 1080x2246. It is a video mode DSI panel.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - tianma,fhd-video
+ - const: novatek,nt36672a
+ description: This indicates the panel manufacturer of the panel that is
+ in turn using the NT36672A panel driver. This compatible string
+ determines how the NT36672A panel driver is configured for the indicated
+ panel. The novatek,nt36672a compatible shall always be provided as a fallback.
+
+ reset-gpios:
+ description: phandle of gpio for reset line - This should be 8mA, gpio
+ can be configured using mux, pinctrl, pinctrl-names (active high)
+
+ vddio-supply:
+ description: phandle of the regulator that provides the supply voltage
+ Power IC supply
+
+ vddpos-supply:
+ description: phandle of the positive boost supply regulator
+
+ vddneg-supply:
+ description: phandle of the negative boost supply regulator
+
+ reg: true
+ port: true
+
+required:
+ - compatible
+ - reg
+ - vddi0-supply
+ - vddpos-supply
+ - vddneg-supply
+ - reset-gpios
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |+
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "tianma,fhd-video", "novatek,nt36672a";
+ reg = <0>;
+ vddi0-supply = <&vreg_l14a_1p88>;
+ vddpos-supply = <&lab>;
+ vddneg-supply = <&ibb>;
+
+ reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port {
+ tianma_nt36672a_in_0: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
index c0dd9fa29f1d..a29ab65507f0 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -47,6 +47,8 @@ properties:
- panasonic,vvx10f004b00
# Panasonic 10" WUXGA TFT LCD panel
- panasonic,vvx10f034n00
+ # Shangai Top Display Optoelectronics 7" TL070WSH30 1024x600 TFT LCD panel
+ - tdo,tl070wsh30
reg:
maxItems: 1
@@ -54,6 +56,7 @@ properties:
backlight: true
enable-gpios: true
+ reset-gpios: true
port: true
power-supply: true
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index edb53ab0d9eb..f9750b0b6708 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -282,6 +282,8 @@ properties:
- vxt,vl050-8048nt-c01
# Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
- winstar,wf35ltiacd
+ # Yes Optoelectronics YTC700TLAG-05-201C 7" TFT LCD panel
+ - yes-optoelectronics,ytc700tlag-05-201c
backlight: true
enable-gpios: true
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
index 4f9185462ed3..4dc30738ee57 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -55,6 +55,14 @@ properties:
- const: vp1
- const: vp2
+ assigned-clocks:
+ minItems: 1
+ maxItems: 3
+
+ assigned-clock-parents:
+ minItems: 1
+ maxItems: 3
+
interrupts:
maxItems: 1
@@ -62,6 +70,9 @@ properties:
maxItems: 1
description: phandle to the associated power domain
+ dma-coherent:
+ type: boolean
+
ports:
type: object
description:
diff --git a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
index 173730d56334..c9a947d55fa4 100644
--- a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
+++ b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
@@ -77,6 +77,14 @@ properties:
- const: vp3
- const: vp4
+ assigned-clocks:
+ minItems: 1
+ maxItems: 5
+
+ assigned-clock-parents:
+ minItems: 1
+ maxItems: 5
+
interrupts:
items:
- description: common_m DSS Master common
@@ -95,6 +103,9 @@ properties:
maxItems: 1
description: phandle to the associated power domain
+ dma-coherent:
+ type: boolean
+
ports:
type: object
description:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 2735be1a8470..12e1419823d4 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -1053,6 +1053,8 @@ patternProperties:
description: Trusted Computing Group
"^tcl,.*":
description: Toby Churchill Ltd.
+ "^tdo,.*":
+ description: Shangai Top Display Optoelectronics Co., Ltd
"^technexion,.*":
description: TechNexion
"^technologic,.*":
@@ -1210,6 +1212,8 @@ patternProperties:
description: Shenzhen Xunlong Software CO.,Limited
"^xylon,.*":
description: Xylon
+ "^yes-optoelectronics,.*":
+ description: Yes Optoelectronics Co.,Ltd.
"^ylm,.*":
description: Shenzhen Yangliming Electronic Technology Co., Ltd.
"^yna,.*":
diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst
index 4144b669e80c..d6b2a195dbed 100644
--- a/Documentation/driver-api/dma-buf.rst
+++ b/Documentation/driver-api/dma-buf.rst
@@ -115,6 +115,15 @@ Kernel Functions and Structures Reference
.. kernel-doc:: include/linux/dma-buf.h
:internal:
+Buffer Mapping Helpers
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: include/linux/dma-buf-map.h
+ :doc: overview
+
+.. kernel-doc:: include/linux/dma-buf-map.h
+ :internal:
+
Reservation Objects
-------------------
diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst
index 9abee1589c1e..21be6deadc12 100644
--- a/Documentation/gpu/drm-mm.rst
+++ b/Documentation/gpu/drm-mm.rst
@@ -182,11 +182,11 @@ acquired and release by calling drm_gem_object_get() and drm_gem_object_put()
respectively.
When the last reference to a GEM object is released the GEM core calls
-the :c:type:`struct drm_driver <drm_driver>` gem_free_object_unlocked
+the :c:type:`struct drm_gem_object_funcs <gem_object_funcs>` free
operation. That operation is mandatory for GEM-enabled drivers and must
free the GEM object and all associated resources.
-void (\*gem_free_object) (struct drm_gem_object \*obj); Drivers are
+void (\*free) (struct drm_gem_object \*obj); Drivers are
responsible for freeing all GEM object resources. This includes the
resources created by the GEM core, which need to be released with
drm_gem_object_release().
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index b0ea17da8ff6..700637e25ecd 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -149,7 +149,7 @@ have to keep track of that lock and either call ``unreference`` or
``unreference_locked`` depending upon context.
Core GEM doesn't have a need for ``struct_mutex`` any more since kernel 4.8,
-and there's a ``gem_free_object_unlocked`` callback for any drivers which are
+and there's a GEM object ``free`` callback for any drivers which are
entirely ``struct_mutex`` free.
For drivers that need ``struct_mutex`` it should be replaced with a driver-
@@ -289,11 +289,8 @@ struct drm_gem_object_funcs
---------------------------
GEM objects can now have a function table instead of having the callbacks on the
-DRM driver struct. This is now the preferred way and drivers can be moved over.
-
-We also need a 2nd version of the CMA define that doesn't require the
-vmapping to be present (different hook for prime importing). Plus this needs to
-be rolled out to all drivers using their own implementations, too.
+DRM driver struct. This is now the preferred way. Callbacks in drivers have been
+converted, except for struct drm_driver.gem_prime_mmap.
Level: Intermediate
@@ -518,9 +515,6 @@ There's a bunch of issues with it:
this (together with the drm_minor->drm_device move) would allow us to remove
debugfs_init.
-- Drop the return code and error checking from all debugfs functions. Greg KH is
- working on this already.
-
Contact: Daniel Vetter
Level: Intermediate
diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst
index 61586fc861bb..13bab1d93bb3 100644
--- a/Documentation/gpu/vkms.rst
+++ b/Documentation/gpu/vkms.rst
@@ -10,36 +10,24 @@
TODO
====
-CRC API Improvements
---------------------
-
-- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
-
-- Use the alpha value to blend vaddr_src with vaddr_dst instead of
- overwriting it in ``blend()``.
-
-- Add igt test to check cleared alpha value for XRGB plane format.
-
-- Add igt test to check extreme alpha values i.e. fully opaque and fully
- transparent (intermediate values are affected by hw-specific rounding modes).
-
-Runtime Configuration
----------------------
-
-We want to be able to reconfigure vkms instance without having to reload the
-module. Use/Test-cases:
-
-- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling of
- compositors).
+If you want to do any of the items listed below, please share your interest
+with VKMS maintainers.
-- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
- them first).
+IGT better support
+------------------
-- Change output configuration: Plug/unplug screens, change EDID, allow changing
- the refresh rate.
+- Investigate: (1) test cases on kms_plane that are failing due to timeout on
+ capturing CRC; (2) when running kms_flip test cases in sequence, some
+ successful individual test cases are failing randomly.
-The currently proposed solution is to expose vkms configuration through
-configfs. All existing module options should be supported through configfs too.
+- VKMS already has support for vblanks simulated via hrtimers, which can be
+ tested with kms_flip test; in some way, we can say that VKMS already mimics
+ the real hardware vblank. However, we also have virtual hardware that does
+ not support vblank interrupt and completes page_flip events right away; in
+ this case, compositor developers may end up creating a busy loop on virtual
+ hardware. It would be useful to support Virtual Hardware behavior in VKMS
+ because this can help compositor developers to test their features in
+ multiple scenarios.
Add Plane Features
------------------
@@ -55,34 +43,50 @@ There's lots of plane features we could add support for:
- Additional buffer formats, especially YUV formats for video like NV12.
Low/high bpp RGB formats would also be interesting.
-- Async updates (currently only possible on cursor plane using the legacy cursor
- api).
+- Async updates (currently only possible on cursor plane using the legacy
+ cursor api).
+
+For all of these, we also want to review the igt test coverage and make sure
+all relevant igt testcases work on vkms.
+
+Prime Buffer Sharing
+--------------------
-For all of these, we also want to review the igt test coverage and make sure all
-relevant igt testcases work on vkms.
+- Syzbot report - WARNING in vkms_gem_free_object:
+ https://syzkaller.appspot.com/bug?extid=e7ad70d406e74d8fc9d0
+
+Runtime Configuration
+---------------------
+
+We want to be able to reconfigure vkms instance without having to reload the
+module. Use/Test-cases:
+
+- Hotplug/hotremove connectors on the fly (to be able to test DP MST handling
+ of compositors).
+
+- Configure planes/crtcs/connectors (we'd need some code to have more than 1 of
+ them first).
+
+- Change output configuration: Plug/unplug screens, change EDID, allow changing
+ the refresh rate.
+
+The currently proposed solution is to expose vkms configuration through
+configfs. All existing module options should be supported through configfs
+too.
Writeback support
-----------------
-Currently vkms only computes a CRC for each frame. Once we have additional plane
-features, we could write back the entire composited frame, and expose it as:
+- The writeback and CRC capture operations share the use of composer_enabled
+ boolean to ensure vblanks. Probably, when these operations work together,
+ composer_enabled needs to refcounting the composer state to proper work.
-- Writeback connector. This is useful for testing compositors if you don't have
- hardware with writeback support.
+- Add support for cloned writeback outputs and related test cases using a
+ cloned output in the IGT kms_writeback.
- As a v4l device. This is useful for debugging compositors on special vkms
configurations, so that developers see what's really going on.
-Prime Buffer Sharing
---------------------
-
-We already have vgem, which is a gem driver for testing rendering, similar to
-how vkms is for testing the modeset side. Adding buffer sharing support to vkms
-allows us to test them together, to test synchronization and lots of other
-features. Also, this allows compositors to test whether they work correctly on
-SoC chips, where the display and rendering is very often split between 2
-drivers.
-
Output Features
---------------
@@ -93,7 +97,10 @@ Output Features
- Add support for link status, so that compositors can validate their runtime
fallbacks when e.g. a Display Port link goes bad.
-- All the hotplug handling describe under "Runtime Configuration".
+CRC API Improvements
+--------------------
+
+- Optimize CRC computation ``compute_crc()`` and plane blending ``blend()``
Atomic Check using eBPF
-----------------------
diff --git a/MAINTAINERS b/MAINTAINERS
index b516bb34a8d5..71e29dc0ab9d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5576,6 +5576,13 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
+DRM DRIVER FOR NOVATEK NT36672A PANELS
+M: Sumit Semwal <sumit.semwal@linaro.org>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
+F: drivers/gpu/drm/panel/panel-novatek-nt36672a.c
+
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Ben Skeggs <bskeggs@redhat.com>
L: dri-devel@lists.freedesktop.org
@@ -5955,6 +5962,7 @@ F: include/uapi/drm/v3d_drm.h
DRM DRIVERS FOR VC4
M: Eric Anholt <eric@anholt.net>
+M: Maxime Ripard <mripard@kernel.org>
S: Supported
T: git git://github.com/anholt/linux
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -6911,10 +6919,9 @@ F: drivers/net/wan/dlci.c
F: drivers/net/wan/sdla.c
FRAMEBUFFER LAYER
-M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
L: dri-devel@lists.freedesktop.org
L: linux-fbdev@vger.kernel.org
-S: Maintained
+S: Orphan
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/fb/
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 844967f98866..556f62e8b196 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -851,6 +851,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unpin);
* Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
* on error. May return -EINTR if it is interrupted by a signal.
*
+ * On success, the DMA addresses and lengths in the returned scatterlist are
+ * PAGE_SIZE aligned.
+ *
* A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
* the underlying backing storage is pinned for as long as a mapping exists,
* therefore users/importers should not hold onto a mapping for undue amounts of
@@ -904,6 +907,24 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
attach->dir = direction;
}
+#ifdef CONFIG_DMA_API_DEBUG
+ {
+ struct scatterlist *sg;
+ u64 addr;
+ int len;
+ int i;
+
+ for_each_sgtable_dma_sg(sg_table, sg, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
+ pr_debug("%s: addr %llx or len %x is not page aligned!\n",
+ __func__, addr, len);
+ }
+ }
+ }
+#endif /* CONFIG_DMA_API_DEBUG */
+
return sg_table;
}
EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
@@ -1188,68 +1209,72 @@ EXPORT_SYMBOL_GPL(dma_buf_mmap);
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel
* address space. Same restrictions as for vmap and friends apply.
* @dmabuf: [in] buffer to vmap
+ * @map: [out] returns the vmap pointer
*
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
* Please attempt to use kmap/kunmap before thinking about these interfaces.
*
- * Returns NULL on error.
+ * Returns 0 on success, or a negative errno code otherwise.
*/
-void *dma_buf_vmap(struct dma_buf *dmabuf)
+int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
- void *ptr;
+ struct dma_buf_map ptr;
+ int ret = 0;
+
+ dma_buf_map_clear(map);
if (WARN_ON(!dmabuf))
- return NULL;
+ return -EINVAL;
if (!dmabuf->ops->vmap)
- return NULL;
+ return -EINVAL;
mutex_lock(&dmabuf->lock);
if (dmabuf->vmapping_counter) {
dmabuf->vmapping_counter++;
- BUG_ON(!dmabuf->vmap_ptr);
- ptr = dmabuf->vmap_ptr;
+ BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
+ *map = dmabuf->vmap_ptr;
goto out_unlock;
}
- BUG_ON(dmabuf->vmap_ptr);
+ BUG_ON(dma_buf_map_is_set(&dmabuf->vmap_ptr));
- ptr = dmabuf->ops->vmap(dmabuf);
- if (WARN_ON_ONCE(IS_ERR(ptr)))
- ptr = NULL;
- if (!ptr)
+ ret = dmabuf->ops->vmap(dmabuf, &ptr);
+ if (WARN_ON_ONCE(ret))
goto out_unlock;
dmabuf->vmap_ptr = ptr;
dmabuf->vmapping_counter = 1;
+ *map = dmabuf->vmap_ptr;
+
out_unlock:
mutex_unlock(&dmabuf->lock);
- return ptr;
+ return ret;
}
EXPORT_SYMBOL_GPL(dma_buf_vmap);
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
* @dmabuf: [in] buffer to vunmap
- * @vaddr: [in] vmap to vunmap
+ * @map: [in] vmap pointer to vunmap
*/
-void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
if (WARN_ON(!dmabuf))
return;
- BUG_ON(!dmabuf->vmap_ptr);
+ BUG_ON(dma_buf_map_is_null(&dmabuf->vmap_ptr));
BUG_ON(dmabuf->vmapping_counter == 0);
- BUG_ON(dmabuf->vmap_ptr != vaddr);
+ BUG_ON(!dma_buf_map_is_equal(&dmabuf->vmap_ptr, map));
mutex_lock(&dmabuf->lock);
if (--dmabuf->vmapping_counter == 0) {
if (dmabuf->ops->vunmap)
- dmabuf->ops->vunmap(dmabuf, vaddr);
- dmabuf->vmap_ptr = NULL;
+ dmabuf->ops->vunmap(dmabuf, map);
+ dma_buf_map_clear(&dmabuf->vmap_ptr);
}
mutex_unlock(&dmabuf->lock);
}
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index 1c8f2581cb09..bb5a42b10c29 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -63,7 +63,7 @@ static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
{
struct dma_resv_list *list;
- list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
+ list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
if (!list)
return NULL;
diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
index d0696cf937af..fcf4ce3e2cbb 100644
--- a/drivers/dma-buf/heaps/heap-helpers.c
+++ b/drivers/dma-buf/heaps/heap-helpers.c
@@ -235,7 +235,7 @@ static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
return 0;
}
-static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
+static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
void *vaddr;
@@ -244,10 +244,14 @@ static void *dma_heap_dma_buf_vmap(struct dma_buf *dmabuf)
vaddr = dma_heap_buffer_vmap_get(buffer);
mutex_unlock(&buffer->lock);
- return vaddr;
+ if (!vaddr)
+ return -ENOMEM;
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
-static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct heap_helper_buffer *buffer = dmabuf->priv;
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 147d61b9674e..32257189e09b 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -287,6 +287,7 @@ config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
depends on DRM
select DRM_KMS_HELPER
+ select DRM_GEM_SHMEM_HELPER
select CRC32
default n
help
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 5da487b64a66..054a1c2d5054 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1479,7 +1479,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
}
}
- if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->pin_count)
+ if (!amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) && !bo->tbo.pin_count)
amdgpu_bo_fence(bo,
&avm->process_info->eviction_fence->base,
true);
@@ -1558,7 +1558,8 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
* required.
*/
if (mem->mapped_to_gpu_memory == 0 &&
- !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) && !mem->bo->pin_count)
+ !amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm) &&
+ !mem->bo->tbo.pin_count)
amdgpu_amdkfd_remove_eviction_fence(mem->bo,
process_info->eviction_fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 12598a4b5c78..d50b63a93d37 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -410,7 +410,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
uint32_t domain;
int r;
- if (bo->pin_count)
+ if (bo->tbo.pin_count)
return 0;
/* Don't move this buffer if we have depleted our allowance
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 2d125b8b15ee..065937482239 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1319,6 +1319,7 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
+ struct ttm_resource_manager *man;
int r;
r = pm_runtime_get_sync(dev->dev);
@@ -1327,7 +1328,9 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data)
return r;
}
- seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT));
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
+ r = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+ seq_printf(m, "(%d)\n", r);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 7cc7af2a6822..b25faaee6f0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -132,10 +132,7 @@ static void amdgpu_display_unpin_work_func(struct work_struct *__work)
/* unpin of the old buffer */
r = amdgpu_bo_reserve(work->old_abo, true);
if (likely(r == 0)) {
- r = amdgpu_bo_unpin(work->old_abo);
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to unpin buffer after flip\n");
- }
+ amdgpu_bo_unpin(work->old_abo);
amdgpu_bo_unreserve(work->old_abo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
@@ -249,8 +246,7 @@ pflip_cleanup:
}
unpin:
if (!adev->enable_virtual_display)
- if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
- DRM_ERROR("failed to unpin new abo in error path\n");
+ amdgpu_bo_unpin(new_abo);
unreserve:
amdgpu_bo_unreserve(new_abo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index 957934926b24..5b465ab774d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -281,7 +281,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct sg_table *sgt;
long r;
- if (!bo->pin_count) {
+ if (!bo->tbo.pin_count) {
/* move buffer into GTT or VRAM */
struct ttm_operation_ctx ctx = { false, false };
unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
@@ -390,7 +390,8 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
if (unlikely(ret != 0))
return ret;
- if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
+ if (!bo->tbo.pin_count &&
+ (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 42d9748921f5..8b30915aa972 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1520,19 +1520,13 @@ static struct drm_driver kms_driver = {
.lastclose = amdgpu_driver_lastclose_kms,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
- .gem_free_object_unlocked = amdgpu_gem_object_free,
- .gem_open_object = amdgpu_gem_object_open,
- .gem_close_object = amdgpu_gem_object_close,
.dumb_create = amdgpu_mode_dumb_create,
.dumb_map_offset = amdgpu_mode_dumb_mmap,
.fops = &amdgpu_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = amdgpu_gem_prime_export,
.gem_prime_import = amdgpu_gem_prime_import,
- .gem_prime_vmap = amdgpu_gem_prime_vmap,
- .gem_prime_vunmap = amdgpu_gem_prime_vunmap,
.gem_prime_mmap = amdgpu_gem_prime_mmap,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7e8265da9f25..8ea6fc745769 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,9 +36,12 @@
#include "amdgpu.h"
#include "amdgpu_display.h"
+#include "amdgpu_dma_buf.h"
#include "amdgpu_xgmi.h"
-void amdgpu_gem_object_free(struct drm_gem_object *gobj)
+static const struct drm_gem_object_funcs amdgpu_gem_object_funcs;
+
+static void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
@@ -87,6 +90,7 @@ retry:
return r;
}
*obj = &bo->tbo.base;
+ (*obj)->funcs = &amdgpu_gem_object_funcs;
return 0;
}
@@ -119,8 +123,8 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
-int amdgpu_gem_object_open(struct drm_gem_object *obj,
- struct drm_file *file_priv)
+static int amdgpu_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
@@ -152,8 +156,8 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
return 0;
}
-void amdgpu_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv)
+static void amdgpu_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
@@ -211,6 +215,15 @@ out_unlock:
ttm_eu_backoff_reservation(&ticket, &list);
}
+static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = {
+ .free = amdgpu_gem_object_free,
+ .open = amdgpu_gem_object_open,
+ .close = amdgpu_gem_object_close,
+ .export = amdgpu_gem_prime_export,
+ .vmap = amdgpu_gem_prime_vmap,
+ .vunmap = amdgpu_gem_prime_vunmap,
+};
+
/*
* GEM ioctls.
*/
@@ -870,7 +883,7 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
seq_printf(m, "\t0x%08x: %12ld byte %s",
id, amdgpu_bo_size(bo), placement);
- pin_count = READ_ONCE(bo->pin_count);
+ pin_count = READ_ONCE(bo->tbo.pin_count);
if (pin_count)
seq_printf(m, " pin count %d", pin_count);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
index e0f025dd1b14..637bf51dbf06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h
@@ -33,11 +33,6 @@
#define AMDGPU_GEM_DOMAIN_MAX 0x3
#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
-void amdgpu_gem_object_free(struct drm_gem_object *obj);
-int amdgpu_gem_object_open(struct drm_gem_object *obj,
- struct drm_file *file_priv);
-void amdgpu_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv);
unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 36604d751d62..cc86f431a3d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -45,12 +45,10 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_dma_tt *ttm;
switch (bo->tbo.mem.mem_type) {
case TTM_PL_TT:
- ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
- *addr = ttm->dma_address[0];
+ *addr = bo->tbo.ttm->dma_address[0];
break;
case TTM_PL_VRAM:
*addr = amdgpu_bo_gpu_offset(bo);
@@ -122,16 +120,14 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- struct ttm_dma_tt *ttm;
- if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
+ if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
return AMDGPU_BO_INVALID_OFFSET;
- ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
- if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
+ if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
return AMDGPU_BO_INVALID_OFFSET;
- return adev->gmc.agp_start + ttm->dma_address[0];
+ return adev->gmc.agp_start + bo->ttm->dma_address[0];
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index f203e4a6a3f2..1721739def84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -136,7 +136,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev)
ttm_resource_manager_set_used(man, false);
- ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
+ ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
if (ret)
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ac043baac05d..1aa516429c80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -78,7 +78,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
- if (bo->pin_count > 0)
+ if (bo->tbo.pin_count > 0)
amdgpu_bo_subtract_pin_size(bo);
amdgpu_bo_kunmap(bo);
@@ -137,7 +137,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_VRAM;
- places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
+ places[c].flags = 0;
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
@@ -154,11 +154,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_TT;
places[c].flags = 0;
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
- places[c].flags |= TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
- else
- places[c].flags |= TTM_PL_FLAG_CACHED;
c++;
}
@@ -167,11 +162,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_SYSTEM;
places[c].flags = 0;
- if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
- places[c].flags |= TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
- else
- places[c].flags |= TTM_PL_FLAG_CACHED;
c++;
}
@@ -179,7 +169,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_GDS;
- places[c].flags = TTM_PL_FLAG_UNCACHED;
+ places[c].flags = 0;
c++;
}
@@ -187,7 +177,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_GWS;
- places[c].flags = TTM_PL_FLAG_UNCACHED;
+ places[c].flags = 0;
c++;
}
@@ -195,7 +185,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = AMDGPU_PL_OA;
- places[c].flags = TTM_PL_FLAG_UNCACHED;
+ places[c].flags = 0;
c++;
}
@@ -203,7 +193,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
places[c].fpfn = 0;
places[c].lpfn = 0;
places[c].mem_type = TTM_PL_SYSTEM;
- places[c].flags = TTM_PL_MASK_CACHING;
+ places[c].flags = 0;
c++;
}
@@ -721,7 +711,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
uint32_t domain;
int r;
- if (bo->pin_count)
+ if (bo->tbo.pin_count)
return 0;
domain = bo->preferred_domains;
@@ -918,13 +908,13 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
*/
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
- if (bo->pin_count) {
+ if (bo->tbo.pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
- bo->pin_count++;
+ ttm_bo_pin(&bo->tbo);
if (max_offset != 0) {
u64 domain_start = amdgpu_ttm_domain_start(adev,
@@ -955,7 +945,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (!bo->placements[i].lpfn ||
(lpfn && lpfn < bo->placements[i].lpfn))
bo->placements[i].lpfn = lpfn;
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
@@ -964,7 +953,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
goto error;
}
- bo->pin_count = 1;
+ ttm_bo_pin(&bo->tbo);
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -1006,34 +995,16 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
* Returns:
* 0 for success or a negative error code on failure.
*/
-int amdgpu_bo_unpin(struct amdgpu_bo *bo)
+void amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
- struct ttm_operation_ctx ctx = { false, false };
- int r, i;
-
- if (WARN_ON_ONCE(!bo->pin_count)) {
- dev_warn(adev->dev, "%p unpin not necessary\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
+ ttm_bo_unpin(&bo->tbo);
+ if (bo->tbo.pin_count)
+ return;
amdgpu_bo_subtract_pin_size(bo);
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
-
- for (i = 0; i < bo->placement.num_placement; i++) {
- bo->placements[i].lpfn = 0;
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- }
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (unlikely(r))
- dev_err(adev->dev, "%p validate failed for unpin\n", bo);
-
- return r;
}
/**
@@ -1048,6 +1019,8 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
*/
int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
{
+ struct ttm_resource_manager *man;
+
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
#ifndef CONFIG_HIBERNATION
if (adev->flags & AMD_IS_APU) {
@@ -1055,7 +1028,9 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
return 0;
}
#endif
- return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
+
+ man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+ return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
}
static const char *amdgpu_vram_names[] = {
@@ -1360,19 +1335,14 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
* Returns:
* 0 for success or a negative error code on failure.
*/
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
- struct amdgpu_bo *abo;
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
unsigned long offset, size;
int r;
- if (!amdgpu_bo_is_amdgpu_bo(bo))
- return 0;
-
- abo = ttm_to_amdgpu_bo(bo);
-
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
@@ -1385,8 +1355,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
/* Can't move a pinned BO to visible VRAM */
- if (abo->pin_count > 0)
- return -EINVAL;
+ if (abo->tbo.pin_count > 0)
+ return VM_FAULT_SIGBUS;
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
@@ -1398,15 +1368,18 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
abo->placement.busy_placement = &abo->placements[1];
r = ttm_bo_validate(bo, &abo->placement, &ctx);
- if (unlikely(r != 0))
- return r;
+ if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
+ return VM_FAULT_NOPAGE;
+ else if (unlikely(r))
+ return VM_FAULT_SIGBUS;
offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
if (bo->mem.mem_type == TTM_PL_VRAM &&
(offset + size) > adev->gmc.visible_vram_size)
- return -EINVAL;
+ return VM_FAULT_SIGBUS;
+ ttm_bo_move_to_lru_tail_unlocked(bo);
return 0;
}
@@ -1489,7 +1462,7 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
- !bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
+ !bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 5ddb6cf96030..132e5f955180 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -89,7 +89,6 @@ struct amdgpu_bo {
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
u64 flags;
- unsigned pin_count;
u64 tiling_flags;
u64 metadata_flags;
void *metadata;
@@ -267,7 +266,7 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo);
int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset);
-int amdgpu_bo_unpin(struct amdgpu_bo *bo);
+void amdgpu_bo_unpin(struct amdgpu_bo *bo);
int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
int amdgpu_bo_late_init(struct amdgpu_device *adev);
@@ -285,7 +284,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
-int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8039d2399584..ddb1c8e9eea4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -66,6 +66,8 @@
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
+static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
unsigned int type,
@@ -92,7 +94,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
/* Don't handle scatter gather BOs */
@@ -292,11 +294,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
cpu_addr = &job->ibs[0].ptr[num_dw];
if (mem->mem_type == TTM_PL_TT) {
- struct ttm_dma_tt *dma;
dma_addr_t *dma_address;
- dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
- dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+ dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
cpu_addr);
if (r)
@@ -538,19 +538,13 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
- placements.flags = TTM_PL_MASK_CACHING;
+ placements.flags = 0;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit from VRAM\n");
return r;
}
- /* set caching flags */
- r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
- if (unlikely(r)) {
- goto out_cleanup;
- }
-
r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (unlikely(r))
goto out_cleanup;
@@ -567,8 +561,13 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
goto out_cleanup;
}
- /* move BO (in tmp_mem) to new_mem */
- r = ttm_bo_move_ttm(bo, ctx, new_mem);
+ r = ttm_bo_wait_ctx(bo, ctx);
+ if (unlikely(r))
+ goto out_cleanup;
+
+ amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
out_cleanup:
ttm_resource_free(bo, &tmp_mem);
return r;
@@ -599,7 +598,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
- placements.flags = TTM_PL_MASK_CACHING;
+ placements.flags = 0;
r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
pr_err("Failed to find GTT space for blit to VRAM\n");
@@ -607,11 +606,16 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
}
/* move/bind old memory to GTT space */
- r = ttm_bo_move_ttm(bo, ctx, &tmp_mem);
+ r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (unlikely(r))
+ return r;
+
+ r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
+ ttm_bo_assign_mem(bo, &tmp_mem);
/* copy to VRAM */
r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
if (unlikely(r)) {
@@ -660,9 +664,17 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = &bo->mem;
int r;
+ if (new_mem->mem_type == TTM_PL_TT) {
+ r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
+ if (r)
+ return r;
+ }
+
+ amdgpu_bo_move_notify(bo, evict, new_mem);
+
/* Can't move a pinned BO */
abo = ttm_to_amdgpu_bo(bo);
- if (WARN_ON_ONCE(abo->pin_count > 0))
+ if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
return -EINVAL;
adev = amdgpu_ttm_adev(bo->bdev);
@@ -671,14 +683,24 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
ttm_bo_move_null(bo, new_mem);
return 0;
}
- if ((old_mem->mem_type == TTM_PL_TT &&
- new_mem->mem_type == TTM_PL_SYSTEM) ||
- (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_TT)) {
- /* bind is enough */
+ if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
+
+ if (old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ r = ttm_bo_wait_ctx(bo, ctx);
+ if (r)
+ goto fail;
+
+ amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
+ }
+
if (old_mem->mem_type == AMDGPU_PL_GDS ||
old_mem->mem_type == AMDGPU_PL_GWS ||
old_mem->mem_type == AMDGPU_PL_OA ||
@@ -712,12 +734,12 @@ memcpy:
if (!amdgpu_mem_visible(adev, old_mem) ||
!amdgpu_mem_visible(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n");
- return r;
+ goto fail;
}
r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r)
- return r;
+ goto fail;
}
if (bo->type == ttm_bo_type_device &&
@@ -732,6 +754,11 @@ memcpy:
/* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved);
return 0;
+fail:
+ swap(*new_mem, bo->mem);
+ amdgpu_bo_move_notify(bo, false, new_mem);
+ swap(*new_mem, bo->mem);
+ return r;
}
/**
@@ -767,6 +794,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
@@ -811,7 +839,7 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
* TTM backend functions.
*/
struct amdgpu_ttm_tt {
- struct ttm_dma_tt ttm;
+ struct ttm_tt ttm;
struct drm_gem_object *gobj;
u64 offset;
uint64_t userptr;
@@ -943,7 +971,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
if (!gtt || !gtt->userptr)
return false;
- DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
+ DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
gtt->userptr, ttm->num_pages);
WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
@@ -1095,7 +1123,7 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
gart_bind_fail:
if (r)
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
return r;
@@ -1130,7 +1158,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
}
}
if (!ttm->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
@@ -1153,7 +1181,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
ttm->pages, gtt->ttm.dma_address, flags);
if (r)
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
gtt->bound = true;
return r;
@@ -1267,8 +1295,8 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
if (r)
- DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
- gtt->ttm.ttm.num_pages, gtt->offset);
+ DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
+ gtt->ttm.num_pages, gtt->offset);
gtt->bound = false;
}
@@ -1282,7 +1310,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
if (gtt->usertask)
put_task_struct(gtt->usertask);
- ttm_dma_tt_fini(&gtt->ttm);
+ ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
@@ -1296,7 +1324,9 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_ttm_tt *gtt;
+ enum ttm_caching caching;
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL) {
@@ -1304,12 +1334,17 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
}
gtt->gobj = &bo->base;
+ if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC)
+ caching = ttm_write_combined;
+ else
+ caching = ttm_cached;
+
/* allocate space for the uninitialized page entries */
- if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags)) {
+ if (ttm_sg_tt_init(&gtt->ttm, bo, page_flags, caching)) {
kfree(gtt);
return NULL;
}
- return &gtt->ttm.ttm;
+ return &gtt->ttm;
}
/**
@@ -1332,7 +1367,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG;
- ttm_tt_set_populated(ttm);
return 0;
}
@@ -1352,7 +1386,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address,
ttm->num_pages);
- ttm_tt_set_populated(ttm);
return 0;
}
@@ -1478,7 +1511,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
/* Return false if no part of the ttm_tt object lies within
* the range
*/
- size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+ size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
@@ -1529,7 +1562,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && mem->mem_type == TTM_PL_TT) {
flags |= AMDGPU_PTE_SYSTEM;
- if (ttm->caching_state == tt_cached)
+ if (ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
@@ -1699,20 +1732,23 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
return ret;
}
+static void
+amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ amdgpu_bo_move_notify(bo, false, NULL);
+}
+
static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
- .ttm_tt_bind = &amdgpu_ttm_backend_bind,
- .ttm_tt_unbind = &amdgpu_ttm_backend_unbind,
.ttm_tt_destroy = &amdgpu_ttm_backend_destroy,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
.verify_access = &amdgpu_verify_access,
- .move_notify = &amdgpu_bo_move_notify,
+ .delete_mem_notify = &amdgpu_bo_delete_mem_notify,
.release_notify = &amdgpu_bo_release_notify,
- .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_pfn = amdgpu_ttm_io_mem_pfn,
.access_memory = &amdgpu_ttm_access_memory,
@@ -2092,15 +2128,48 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
adev->mman.buffer_funcs_enabled = enable;
}
+static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
+{
+ struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+ vm_fault_t ret;
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ return ret;
+
+ ret = amdgpu_bo_fault_reserve_notify(bo);
+ if (ret)
+ goto unlock;
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT, 1);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ return ret;
+
+unlock:
+ dma_resv_unlock(bo->base.resv);
+ return ret;
+}
+
+static struct vm_operations_struct amdgpu_ttm_vm_ops = {
+ .fault = amdgpu_ttm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+ .access = ttm_bo_vm_access
+};
+
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv = filp->private_data;
struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
+ int r;
- if (adev == NULL)
- return -EINVAL;
+ r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+ if (unlikely(r != 0))
+ return r;
- return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
+ vma->vm_ops = &amdgpu_ttm_vm_ops;
+ return 0;
}
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index df110afa97bf..38b59a4fc04c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -609,7 +609,7 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
+ if (bo->pin_count)
return;
abo = ttm_to_amdgpu_bo(bo);
@@ -1790,7 +1790,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
resv = vm->root.base.bo->tbo.base.resv;
} else {
struct drm_gem_object *obj = &bo->tbo.base;
- struct ttm_dma_tt *ttm;
resv = bo->tbo.base.resv;
if (obj->import_attach && bo_va->is_xgmi) {
@@ -1803,10 +1802,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
}
mem = &bo->tbo.mem;
nodes = mem->mm_node;
- if (mem->mem_type == TTM_PL_TT) {
- ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
- pages_addr = ttm->dma_address;
- }
+ if (mem->mem_type == TTM_PL_TT)
+ pages_addr = bo->tbo.ttm->dma_address;
}
if (bo) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 01c1171afbe0..7747be644dd0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -212,7 +212,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
ttm_resource_manager_set_used(man, false);
- ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man);
+ ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
if (ret)
return;
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index be7c29cec318..042d7b54a6de 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -116,7 +116,7 @@ static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
@@ -127,7 +127,7 @@ static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index f33418d6e1a0..a4bbf56a7fc1 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -273,8 +273,10 @@ komeda_crtc_do_flush(struct drm_crtc *crtc,
static void
komeda_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
+ crtc);
pm_runtime_get_sync(crtc->dev->dev);
komeda_crtc_prepare(to_kcrtc(crtc));
drm_crtc_vblank_on(crtc);
@@ -319,8 +321,10 @@ komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
static void
komeda_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
struct komeda_crtc_state *old_st = to_kcrtc_st(old);
struct komeda_pipeline *master = kcrtc->master;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 1d767473ba8a..1f8195bad536 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -41,18 +41,7 @@ static int komeda_register_show(struct seq_file *sf, void *x)
return 0;
}
-static int komeda_register_open(struct inode *inode, struct file *filp)
-{
- return single_open(filp, komeda_register_show, inode->i_private);
-}
-
-static const struct file_operations komeda_register_fops = {
- .owner = THIS_MODULE,
- .open = komeda_register_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+DEFINE_SHOW_ATTRIBUTE(komeda_register);
#ifdef CONFIG_DEBUG_FS
static void komeda_debugfs_init(struct komeda_dev *mdev)
@@ -261,8 +250,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
goto disable_clk;
}
- dev->dma_parms = &mdev->dma_parms;
- dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+ dma_set_max_seg_size(dev, U32_MAX);
mdev->iommu = iommu_get_domain_for_dev(mdev->dev);
if (!mdev->iommu)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index ce27f2f27c24..5b536f0cb548 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -163,8 +163,6 @@ struct komeda_dev {
struct device *dev;
/** @reg_base: the base address of komeda io space */
u32 __iomem *reg_base;
- /** @dma_parms: the dma parameters of komeda */
- struct device_dma_parameters dma_parms;
/** @chip: the basic chip information */
struct komeda_chip_info chip;
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index af67fefed38d..84ac10d59485 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -168,7 +168,7 @@ static void hdlcd_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
@@ -179,7 +179,7 @@ static void hdlcd_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void hdlcd_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index 587d94798f5c..49766eb7a554 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -46,7 +46,7 @@ static enum drm_mode_status malidp_crtc_mode_valid(struct drm_crtc *crtc,
}
static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
@@ -70,8 +70,10 @@ static void malidp_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void malidp_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
int err;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index a887b6a5f8bd..e0fbfc9ce386 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -467,8 +467,10 @@ static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
}
static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct drm_pending_vblank_event *event;
@@ -503,8 +505,10 @@ static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 980d3f1f8f16..22247cfce80b 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -37,13 +37,10 @@ DEFINE_DRM_GEM_FOPS(armada_drm_fops);
static struct drm_driver armada_drm_driver = {
.lastclose = drm_fb_helper_lastclose,
- .gem_free_object_unlocked = armada_gem_free_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = armada_gem_prime_export,
.gem_prime_import = armada_gem_prime_import,
.dumb_create = armada_gem_dumb_create,
- .gem_vm_ops = &armada_gem_vm_ops,
.major = 1,
.minor = 0,
.name = "armada-drm",
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 6654bccd9466..21909642ee4c 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -25,7 +25,7 @@ static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
}
-const struct vm_operations_struct armada_gem_vm_ops = {
+static const struct vm_operations_struct armada_gem_vm_ops = {
.fault = armada_gem_vm_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
@@ -184,6 +184,12 @@ armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
return dobj->addr;
}
+static const struct drm_gem_object_funcs armada_gem_object_funcs = {
+ .free = armada_gem_free_object,
+ .export = armada_gem_prime_export,
+ .vm_ops = &armada_gem_vm_ops,
+};
+
struct armada_gem_object *
armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
{
@@ -195,6 +201,8 @@ armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
if (!obj)
return NULL;
+ obj->obj.funcs = &armada_gem_object_funcs;
+
drm_gem_private_object_init(dev, &obj->obj, size);
DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
@@ -214,6 +222,8 @@ static struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
if (!obj)
return NULL;
+ obj->obj.funcs = &armada_gem_object_funcs;
+
if (drm_gem_object_init(dev, &obj->obj, size)) {
kfree(obj);
return NULL;
diff --git a/drivers/gpu/drm/armada/armada_gem.h b/drivers/gpu/drm/armada/armada_gem.h
index de04cc2c8f0e..ffcc7e8dd351 100644
--- a/drivers/gpu/drm/armada/armada_gem.h
+++ b/drivers/gpu/drm/armada/armada_gem.h
@@ -21,8 +21,6 @@ struct armada_gem_object {
void *update_data;
};
-extern const struct vm_operations_struct armada_gem_vm_ops;
-
#define drm_to_armada_gem(o) container_of(o, struct armada_gem_object, obj)
void armada_gem_free_object(struct drm_gem_object *);
diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
index 018383cfcfa7..5e95bcea43e9 100644
--- a/drivers/gpu/drm/aspeed/Kconfig
+++ b/drivers/gpu/drm/aspeed/Kconfig
@@ -3,6 +3,7 @@ config DRM_ASPEED_GFX
tristate "ASPEED BMC Display Controller"
depends on DRM && OF
depends on (COMPILE_TEST || ARCH_ASPEED)
+ depends on MMU
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DMA_CMA if HAVE_DMA_CONTIGUOUS
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h
index e7ca95827ae8..f1e7e56abc02 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx.h
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h
@@ -75,7 +75,7 @@ int aspeed_gfx_create_output(struct drm_device *drm);
/* CTRL2 */
#define CRT_CTRL_DAC_EN BIT(0)
#define CRT_CTRL_VBLANK_LINE(x) (((x) << 20) & CRT_CTRL_VBLANK_LINE_MASK)
-#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(20, 31)
+#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(31, 20)
/* CRT_HORIZ0 */
#define CRT_H_TOTAL(x) (x)
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 2b424b2b85cc..771ad71cd340 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -193,12 +193,7 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver aspeed_gfx_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .gem_create_object = drm_gem_cma_create_object_default_funcs,
- .dumb_create = drm_gem_cma_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_mmap = drm_gem_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
.fops = &fops,
.name = "aspeed-gfx-drm",
.desc = "ASPEED GFX DRM",
@@ -212,6 +207,69 @@ static const struct of_device_id aspeed_gfx_match[] = {
{ }
};
+#define ASPEED_SCU_VGA0 0x50
+#define ASPEED_SCU_MISC_CTRL 0x2c
+
+static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_gfx *priv = dev_get_drvdata(dev);
+ u32 val;
+ int rc;
+
+ rc = kstrtou32(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (val > 3)
+ return -EINVAL;
+
+ rc = regmap_update_bits(priv->scu, ASPEED_SCU_MISC_CTRL, 0x30000, val << 16);
+ if (rc < 0)
+ return 0;
+
+ return count;
+}
+
+static ssize_t dac_mux_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct aspeed_gfx *priv = dev_get_drvdata(dev);
+ u32 reg;
+ int rc;
+
+ rc = regmap_read(priv->scu, ASPEED_SCU_MISC_CTRL, &reg);
+ if (rc)
+ return rc;
+
+ return sprintf(buf, "%u\n", (reg >> 16) & 0x3);
+}
+static DEVICE_ATTR_RW(dac_mux);
+
+static ssize_t
+vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct aspeed_gfx *priv = dev_get_drvdata(dev);
+ u32 reg;
+ int rc;
+
+ rc = regmap_read(priv->scu, ASPEED_SCU_VGA0, &reg);
+ if (rc)
+ return rc;
+
+ return sprintf(buf, "%u\n", reg & 1);
+}
+static DEVICE_ATTR_RO(vga_pw);
+
+static struct attribute *aspeed_sysfs_entries[] = {
+ &dev_attr_vga_pw.attr,
+ &dev_attr_dac_mux.attr,
+ NULL,
+};
+
+static struct attribute_group aspeed_sysfs_attr_group = {
+ .attrs = aspeed_sysfs_entries,
+};
+
static int aspeed_gfx_probe(struct platform_device *pdev)
{
struct aspeed_gfx *priv;
@@ -226,6 +284,12 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
if (ret)
return ret;
+ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = sysfs_create_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
+ if (ret)
+ return ret;
+
ret = drm_dev_register(&priv->drm, 0);
if (ret)
goto err_unload;
@@ -234,6 +298,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
return 0;
err_unload:
+ sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
aspeed_gfx_unload(&priv->drm);
return ret;
@@ -243,6 +308,7 @@ static int aspeed_gfx_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
+ sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
drm_dev_unregister(drm);
aspeed_gfx_unload(drm);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 834a156e3a75..bd03a8a67e3a 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -742,7 +742,6 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_SUSPEND:
if (ast->tx_chip_type == AST_TX_DP501)
ast_set_dp501_video_output(crtc->dev, 1);
- ast_crtc_load_lut(ast, crtc);
break;
case DRM_MODE_DPMS_OFF:
if (ast->tx_chip_type == AST_TX_DP501)
@@ -778,8 +777,23 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
}
static void
+ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
+{
+ struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
+ struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
+
+ /*
+ * The gamma LUT has to be reloaded after changing the primary
+ * plane's color format.
+ */
+ if (old_ast_crtc_state->format != ast_crtc_state->format)
+ ast_crtc_load_lut(ast, crtc);
+}
+
+static void
ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
@@ -802,8 +816,10 @@ ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
static void
ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
@@ -830,6 +846,7 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_check = ast_crtc_helper_atomic_check,
+ .atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable,
};
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index ce246b96330b..2b3888df22f8 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -165,7 +165,7 @@ atmel_hlcdc_crtc_mode_valid(struct drm_crtc *c,
}
static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
@@ -200,7 +200,7 @@ static void atmel_hlcdc_crtc_atomic_disable(struct drm_crtc *c,
}
static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = c->dev;
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index f101dd2819b5..45838bd08d37 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -55,9 +55,9 @@ static int adv7511_update_cts_n(struct adv7511 *adv7511)
return 0;
}
-int adv7511_hdmi_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *fmt,
- struct hdmi_codec_params *hparms)
+static int adv7511_hdmi_hw_params(struct device *dev, void *data,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
{
struct adv7511 *adv7511 = dev_get_drvdata(dev);
unsigned int audio_source, i2s_format = 0;
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
index e1fa7d820373..024ea2a570e7 100644
--- a/drivers/gpu/drm/bridge/analogix/Kconfig
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -25,3 +25,12 @@ config DRM_ANALOGIX_ANX78XX
config DRM_ANALOGIX_DP
tristate
depends on DRM
+
+config DRM_ANALOGIX_ANX7625
+ tristate "Analogix Anx7625 MIPI to DP interface support"
+ depends on DRM
+ depends on OF
+ help
+ ANX7625 is an ultra-low power 4K mobile HD transmitter
+ designed for portable devices. It converts MIPI/DPI to
+ DisplayPort1.3 4K.
diff --git a/drivers/gpu/drm/bridge/analogix/Makefile b/drivers/gpu/drm/bridge/analogix/Makefile
index 97669b374098..44da392bb9f9 100644
--- a/drivers/gpu/drm/bridge/analogix/Makefile
+++ b/drivers/gpu/drm/bridge/analogix/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o analogix-i2c-dptx.o
obj-$(CONFIG_DRM_ANALOGIX_ANX6345) += analogix-anx6345.o
+obj-$(CONFIG_DRM_ANALOGIX_ANX7625) += anx7625.o
obj-$(CONFIG_DRM_ANALOGIX_ANX78XX) += analogix-anx78xx.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 914c569ab8c1..fafb4b492ea0 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -524,7 +524,7 @@ void analogix_dp_enable_sw_function(struct analogix_dp_device *dp)
writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1);
}
-int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
+static int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp)
{
int reg;
int retval = 0;
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
new file mode 100644
index 000000000000..65cc05982f82
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -0,0 +1,1850 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright(c) 2020, Analogix Semiconductor. All rights reserved.
+ *
+ */
+#include <linux/gcd.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include <linux/of_gpio.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/display_timing.h>
+
+#include "anx7625.h"
+
+/*
+ * There is a sync issue while access I2C register between AP(CPU) and
+ * internal firmware(OCM), to avoid the race condition, AP should access
+ * the reserved slave address before slave address occurs changes.
+ */
+static int i2c_access_workaround(struct anx7625_data *ctx,
+ struct i2c_client *client)
+{
+ u8 offset;
+ struct device *dev = &client->dev;
+ int ret;
+
+ if (client == ctx->last_client)
+ return 0;
+
+ ctx->last_client = client;
+
+ if (client == ctx->i2c.tcpc_client)
+ offset = RSVD_00_ADDR;
+ else if (client == ctx->i2c.tx_p0_client)
+ offset = RSVD_D1_ADDR;
+ else if (client == ctx->i2c.tx_p1_client)
+ offset = RSVD_60_ADDR;
+ else if (client == ctx->i2c.rx_p0_client)
+ offset = RSVD_39_ADDR;
+ else if (client == ctx->i2c.rx_p1_client)
+ offset = RSVD_7F_ADDR;
+ else
+ offset = RSVD_00_ADDR;
+
+ ret = i2c_smbus_write_byte_data(client, offset, 0x00);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev,
+ "fail to access i2c id=%x\n:%x",
+ client->addr, offset);
+
+ return ret;
+}
+
+static int anx7625_reg_read(struct anx7625_data *ctx,
+ struct i2c_client *client, u8 reg_addr)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ i2c_access_workaround(ctx, client);
+
+ ret = i2c_smbus_read_byte_data(client, reg_addr);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "read i2c fail id=%x:%x\n",
+ client->addr, reg_addr);
+
+ return ret;
+}
+
+static int anx7625_reg_block_read(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 reg_addr, u8 len, u8 *buf)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ i2c_access_workaround(ctx, client);
+
+ ret = i2c_smbus_read_i2c_block_data(client, reg_addr, len, buf);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "read i2c block fail id=%x:%x\n",
+ client->addr, reg_addr);
+
+ return ret;
+}
+
+static int anx7625_reg_write(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 reg_addr, u8 reg_val)
+{
+ int ret;
+ struct device *dev = &client->dev;
+
+ i2c_access_workaround(ctx, client);
+
+ ret = i2c_smbus_write_byte_data(client, reg_addr, reg_val);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "fail to write i2c id=%x\n:%x",
+ client->addr, reg_addr);
+
+ return ret;
+}
+
+static int anx7625_write_or(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 offset, u8 mask)
+{
+ int val;
+
+ val = anx7625_reg_read(ctx, client, offset);
+ if (val < 0)
+ return val;
+
+ return anx7625_reg_write(ctx, client, offset, (val | (mask)));
+}
+
+static int anx7625_write_and(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 offset, u8 mask)
+{
+ int val;
+
+ val = anx7625_reg_read(ctx, client, offset);
+ if (val < 0)
+ return val;
+
+ return anx7625_reg_write(ctx, client, offset, (val & (mask)));
+}
+
+static int anx7625_write_and_or(struct anx7625_data *ctx,
+ struct i2c_client *client,
+ u8 offset, u8 and_mask, u8 or_mask)
+{
+ int val;
+
+ val = anx7625_reg_read(ctx, client, offset);
+ if (val < 0)
+ return val;
+
+ return anx7625_reg_write(ctx, client,
+ offset, (val & and_mask) | (or_mask));
+}
+
+static int anx7625_read_ctrl_status_p0(struct anx7625_data *ctx)
+{
+ return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_CTRL_STATUS);
+}
+
+static int wait_aux_op_finish(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int val;
+ int ret;
+
+ ret = readx_poll_timeout(anx7625_read_ctrl_status_p0,
+ ctx, val,
+ (!(val & AP_AUX_CTRL_OP_EN) || (val < 0)),
+ 2000,
+ 2000 * 150);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "aux operation fail!\n");
+ return -EIO;
+ }
+
+ val = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_CTRL_STATUS);
+ if (val < 0 || (val & 0x0F)) {
+ DRM_DEV_ERROR(dev, "aux status %02x\n", val);
+ val = -EIO;
+ }
+
+ return val;
+}
+
+static int anx7625_video_mute_control(struct anx7625_data *ctx,
+ u8 status)
+{
+ int ret;
+
+ if (status) {
+ /* Set mute on flag */
+ ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_MIPI_MUTE);
+ /* Clear mipi RX en */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, (u8)~AP_MIPI_RX_EN);
+ } else {
+ /* Mute off flag */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, (u8)~AP_MIPI_MUTE);
+ /* Set MIPI RX EN */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_MIPI_RX_EN);
+ }
+
+ return ret;
+}
+
+static int anx7625_config_audio_input(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int ret;
+
+ /* Channel num */
+ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6, I2S_CH_2 << 5);
+
+ /* FS */
+ ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_4,
+ 0xf0, AUDIO_FS_48K);
+ /* Word length */
+ ret |= anx7625_write_and_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_5,
+ 0xf0, AUDIO_W_LEN_24_24MAX);
+ /* I2S */
+ ret |= anx7625_write_or(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CHANNEL_STATUS_6, I2S_SLAVE_MODE);
+ ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client,
+ AUDIO_CONTROL_REGISTER, ~TDM_TIMING_MODE);
+ /* Audio change flag */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_AUDIO_CHG);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "fail to config audio.\n");
+
+ return ret;
+}
+
+/* Reduction of fraction a/b */
+static void anx7625_reduction_of_a_fraction(unsigned long *a, unsigned long *b)
+{
+ unsigned long gcd_num;
+ unsigned long tmp_a, tmp_b;
+ u32 i = 1;
+
+ gcd_num = gcd(*a, *b);
+ *a /= gcd_num;
+ *b /= gcd_num;
+
+ tmp_a = *a;
+ tmp_b = *b;
+
+ while ((*a > MAX_UNSIGNED_24BIT) || (*b > MAX_UNSIGNED_24BIT)) {
+ i++;
+ *a = tmp_a / i;
+ *b = tmp_b / i;
+ }
+
+ /*
+ * In the end, make a, b larger to have higher ODFC PLL
+ * output frequency accuracy
+ */
+ while ((*a < MAX_UNSIGNED_24BIT) && (*b < MAX_UNSIGNED_24BIT)) {
+ *a <<= 1;
+ *b <<= 1;
+ }
+
+ *a >>= 1;
+ *b >>= 1;
+}
+
+static int anx7625_calculate_m_n(u32 pixelclock,
+ unsigned long *m,
+ unsigned long *n,
+ u8 *post_divider)
+{
+ if (pixelclock > PLL_OUT_FREQ_ABS_MAX / POST_DIVIDER_MIN) {
+ /* Pixel clock frequency is too high */
+ DRM_ERROR("pixelclock too high, act(%d), maximum(%lu)\n",
+ pixelclock,
+ PLL_OUT_FREQ_ABS_MAX / POST_DIVIDER_MIN);
+ return -EINVAL;
+ }
+
+ if (pixelclock < PLL_OUT_FREQ_ABS_MIN / POST_DIVIDER_MAX) {
+ /* Pixel clock frequency is too low */
+ DRM_ERROR("pixelclock too low, act(%d), maximum(%lu)\n",
+ pixelclock,
+ PLL_OUT_FREQ_ABS_MIN / POST_DIVIDER_MAX);
+ return -EINVAL;
+ }
+
+ for (*post_divider = 1;
+ pixelclock < (PLL_OUT_FREQ_MIN / (*post_divider));)
+ *post_divider += 1;
+
+ if (*post_divider > POST_DIVIDER_MAX) {
+ for (*post_divider = 1;
+ (pixelclock <
+ (PLL_OUT_FREQ_ABS_MIN / (*post_divider)));)
+ *post_divider += 1;
+
+ if (*post_divider > POST_DIVIDER_MAX) {
+ DRM_ERROR("cannot find property post_divider(%d)\n",
+ *post_divider);
+ return -EDOM;
+ }
+ }
+
+ /* Patch to improve the accuracy */
+ if (*post_divider == 7) {
+ /* 27,000,000 is not divisible by 7 */
+ *post_divider = 8;
+ } else if (*post_divider == 11) {
+ /* 27,000,000 is not divisible by 11 */
+ *post_divider = 12;
+ } else if ((*post_divider == 13) || (*post_divider == 14)) {
+ /* 27,000,000 is not divisible by 13 or 14 */
+ *post_divider = 15;
+ }
+
+ if (pixelclock * (*post_divider) > PLL_OUT_FREQ_ABS_MAX) {
+ DRM_ERROR("act clock(%u) large than maximum(%lu)\n",
+ pixelclock * (*post_divider),
+ PLL_OUT_FREQ_ABS_MAX);
+ return -EDOM;
+ }
+
+ *m = pixelclock;
+ *n = XTAL_FRQ / (*post_divider);
+
+ anx7625_reduction_of_a_fraction(m, n);
+
+ return 0;
+}
+
+static int anx7625_odfc_config(struct anx7625_data *ctx,
+ u8 post_divider)
+{
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Config input reference clock frequency 27MHz/19.2MHz */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16,
+ ~(REF_CLK_27000KHZ << MIPI_FREF_D_IND));
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_16,
+ (REF_CLK_27000KHZ << MIPI_FREF_D_IND));
+ /* Post divider */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client,
+ MIPI_DIGITAL_PLL_8, 0x0f);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_8,
+ post_divider << 4);
+
+ /* Add patch for MIS2-125 (5pcs ANX7625 fail ATE MBIST test) */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7,
+ ~MIPI_PLL_VCO_TUNE_REG_VAL);
+
+ /* Reset ODFC PLL */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7,
+ ~MIPI_PLL_RESET_N);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_7,
+ MIPI_PLL_RESET_N);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "IO error.\n");
+
+ return ret;
+}
+
+static int anx7625_dsi_video_timing_config(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ unsigned long m, n;
+ u16 htotal;
+ int ret;
+ u8 post_divider = 0;
+
+ ret = anx7625_calculate_m_n(ctx->dt.pixelclock.min * 1000,
+ &m, &n, &post_divider);
+
+ if (ret) {
+ DRM_DEV_ERROR(dev, "cannot get property m n value.\n");
+ return ret;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "compute M(%lu), N(%lu), divider(%d).\n",
+ m, n, post_divider);
+
+ /* Configure pixel clock */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_L,
+ (ctx->dt.pixelclock.min / 1000) & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, PIXEL_CLOCK_H,
+ (ctx->dt.pixelclock.min / 1000) >> 8);
+ /* Lane count */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p1_client,
+ MIPI_LANE_CTRL_0, 0xfc);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client,
+ MIPI_LANE_CTRL_0, 3);
+
+ /* Htotal */
+ htotal = ctx->dt.hactive.min + ctx->dt.hfront_porch.min +
+ ctx->dt.hback_porch.min + ctx->dt.hsync_len.min;
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_TOTAL_PIXELS_L, htotal & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_TOTAL_PIXELS_H, htotal >> 8);
+ /* Hactive */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_ACTIVE_PIXELS_L, ctx->dt.hactive.min & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_ACTIVE_PIXELS_H, ctx->dt.hactive.min >> 8);
+ /* HFP */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_FRONT_PORCH_L, ctx->dt.hfront_porch.min);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_FRONT_PORCH_H,
+ ctx->dt.hfront_porch.min >> 8);
+ /* HWS */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_SYNC_WIDTH_L, ctx->dt.hsync_len.min);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_SYNC_WIDTH_H, ctx->dt.hsync_len.min >> 8);
+ /* HBP */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_BACK_PORCH_L, ctx->dt.hback_porch.min);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ HORIZONTAL_BACK_PORCH_H, ctx->dt.hback_porch.min >> 8);
+ /* Vactive */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, ACTIVE_LINES_L,
+ ctx->dt.vactive.min);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client, ACTIVE_LINES_H,
+ ctx->dt.vactive.min >> 8);
+ /* VFP */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ VERTICAL_FRONT_PORCH, ctx->dt.vfront_porch.min);
+ /* VWS */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ VERTICAL_SYNC_WIDTH, ctx->dt.vsync_len.min);
+ /* VBP */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p2_client,
+ VERTICAL_BACK_PORCH, ctx->dt.vback_porch.min);
+ /* M value */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PLL_M_NUM_23_16, (m >> 16) & 0xff);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PLL_M_NUM_15_8, (m >> 8) & 0xff);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PLL_M_NUM_7_0, (m & 0xff));
+ /* N value */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PLL_N_NUM_23_16, (n >> 16) & 0xff);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PLL_N_NUM_15_8, (n >> 8) & 0xff);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_PLL_N_NUM_7_0,
+ (n & 0xff));
+ /* Diff */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_DIGITAL_ADJ_1, 0x3D);
+
+ ret |= anx7625_odfc_config(ctx, post_divider - 1);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "mipi dsi setup IO error.\n");
+
+ return ret;
+}
+
+static int anx7625_swap_dsi_lane3(struct anx7625_data *ctx)
+{
+ int val;
+ struct device *dev = &ctx->client->dev;
+
+ /* Swap MIPI-DSI data lane 3 P and N */
+ val = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP);
+ if (val < 0) {
+ DRM_DEV_ERROR(dev, "IO error : access MIPI_SWAP.\n");
+ return -EIO;
+ }
+
+ val |= (1 << MIPI_SWAP_CH3);
+ return anx7625_reg_write(ctx, ctx->i2c.rx_p1_client, MIPI_SWAP, val);
+}
+
+static int anx7625_api_dsi_config(struct anx7625_data *ctx)
+
+{
+ int val, ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Swap MIPI-DSI data lane 3 P and N */
+ ret = anx7625_swap_dsi_lane3(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "IO error : swap dsi lane 3 fail.\n");
+ return ret;
+ }
+
+ /* DSI clock settings */
+ val = (0 << MIPI_HS_PWD_CLK) |
+ (0 << MIPI_HS_RT_CLK) |
+ (0 << MIPI_PD_CLK) |
+ (1 << MIPI_CLK_RT_MANUAL_PD_EN) |
+ (1 << MIPI_CLK_HS_MANUAL_PD_EN) |
+ (0 << MIPI_CLK_DET_DET_BYPASS) |
+ (0 << MIPI_CLK_MISS_CTRL) |
+ (0 << MIPI_PD_LPTX_CH_MANUAL_PD_EN);
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_PHY_CONTROL_3, val);
+
+ /*
+ * Decreased HS prepare timing delay from 160ns to 80ns work with
+ * a) Dragon board 810 series (Qualcomm AP)
+ * b) Moving Pixel DSI source (PG3A pattern generator +
+ * P332 D-PHY Probe) default D-PHY timing
+ * 5ns/step
+ */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_TIME_HS_PRPR, 0x10);
+
+ /* Enable DSI mode*/
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_18,
+ SELECT_DSI << MIPI_DPI_SELECT);
+
+ ret |= anx7625_dsi_video_timing_config(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "dsi video timing config fail\n");
+ return ret;
+ }
+
+ /* Toggle m, n ready */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_6,
+ ~(MIPI_M_NUM_READY | MIPI_N_NUM_READY));
+ usleep_range(1000, 1100);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, MIPI_DIGITAL_PLL_6,
+ MIPI_M_NUM_READY | MIPI_N_NUM_READY);
+
+ /* Configure integer stable register */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_VIDEO_STABLE_CNT, 0x02);
+ /* Power on MIPI RX */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_LANE_CTRL_10, 0x00);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p1_client,
+ MIPI_LANE_CTRL_10, 0x80);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "IO error : mipi dsi enable init fail.\n");
+
+ return ret;
+}
+
+static int anx7625_dsi_config(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "config dsi.\n");
+
+ /* DSC disable */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ R_DSC_CTRL_0, ~DSC_EN);
+
+ ret |= anx7625_api_dsi_config(ctx);
+
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "IO error : api dsi config error.\n");
+ return ret;
+ }
+
+ /* Set MIPI RX EN */
+ ret = anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_MIPI_RX_EN);
+ /* Clear mute flag */
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, (u8)~AP_MIPI_MUTE);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "IO error : enable mipi rx fail.\n");
+ else
+ DRM_DEV_DEBUG_DRIVER(dev, "success to config DSI\n");
+
+ return ret;
+}
+
+static void anx7625_dp_start(struct anx7625_data *ctx)
+{
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ if (!ctx->display_timing_valid) {
+ DRM_DEV_ERROR(dev, "mipi not set display timing yet.\n");
+ return;
+ }
+
+ anx7625_config_audio_input(ctx);
+
+ ret = anx7625_dsi_config(ctx);
+
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "MIPI phy setup error.\n");
+}
+
+static void anx7625_dp_stop(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "stop dp output\n");
+
+ /*
+ * Video disable: 0x72:08 bit 7 = 0;
+ * Audio disable: 0x70:87 bit 0 = 0;
+ */
+ ret = anx7625_write_and(ctx, ctx->i2c.tx_p0_client, 0x87, 0xfe);
+ ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, 0x08, 0x7f);
+
+ ret |= anx7625_video_mute_control(ctx, 1);
+ if (ret < 0)
+ DRM_DEV_ERROR(dev, "IO error : mute video fail\n");
+}
+
+static int sp_tx_rst_aux(struct anx7625_data *ctx)
+{
+ int ret;
+
+ ret = anx7625_write_or(ctx, ctx->i2c.tx_p2_client, RST_CTRL2,
+ AUX_RST);
+ ret |= anx7625_write_and(ctx, ctx->i2c.tx_p2_client, RST_CTRL2,
+ ~AUX_RST);
+ return ret;
+}
+
+static int sp_tx_aux_wr(struct anx7625_data *ctx, u8 offset)
+{
+ int ret;
+
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_BUFF_START, offset);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_COMMAND, 0x04);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN);
+ return (ret | wait_aux_op_finish(ctx));
+}
+
+static int sp_tx_aux_rd(struct anx7625_data *ctx, u8 len_cmd)
+{
+ int ret;
+
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_COMMAND, len_cmd);
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_CTRL_STATUS, AP_AUX_CTRL_OP_EN);
+ return (ret | wait_aux_op_finish(ctx));
+}
+
+static int sp_tx_get_edid_block(struct anx7625_data *ctx)
+{
+ int c = 0;
+ struct device *dev = &ctx->client->dev;
+
+ sp_tx_aux_wr(ctx, 0x7e);
+ sp_tx_aux_rd(ctx, 0x01);
+ c = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, AP_AUX_BUFF_START);
+ if (c < 0) {
+ DRM_DEV_ERROR(dev, "IO error : access AUX BUFF.\n");
+ return -EIO;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, " EDID Block = %d\n", c + 1);
+
+ if (c > MAX_EDID_BLOCK)
+ c = 1;
+
+ return c;
+}
+
+static int edid_read(struct anx7625_data *ctx,
+ u8 offset, u8 *pblock_buf)
+{
+ int ret, cnt;
+ struct device *dev = &ctx->client->dev;
+
+ for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) {
+ sp_tx_aux_wr(ctx, offset);
+ /* Set I2C read com 0x01 mot = 0 and read 16 bytes */
+ ret = sp_tx_aux_rd(ctx, 0xf1);
+
+ if (ret) {
+ sp_tx_rst_aux(ctx);
+ DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n");
+ } else {
+ ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_BUFF_START,
+ MAX_DPCD_BUFFER_SIZE,
+ pblock_buf);
+ if (ret > 0)
+ break;
+ }
+ }
+
+ if (cnt > EDID_TRY_CNT)
+ return -EIO;
+
+ return 0;
+}
+
+static int segments_edid_read(struct anx7625_data *ctx,
+ u8 segment, u8 *buf, u8 offset)
+{
+ u8 cnt;
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Write address only */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_7_0, 0x30);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_COMMAND, 0x04);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_CTRL_STATUS,
+ AP_AUX_CTRL_ADDRONLY | AP_AUX_CTRL_OP_EN);
+
+ ret |= wait_aux_op_finish(ctx);
+ /* Write segment address */
+ ret |= sp_tx_aux_wr(ctx, segment);
+ /* Data read */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_7_0, 0x50);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "IO error : aux initial fail.\n");
+ return ret;
+ }
+
+ for (cnt = 0; cnt <= EDID_TRY_CNT; cnt++) {
+ sp_tx_aux_wr(ctx, offset);
+ /* Set I2C read com 0x01 mot = 0 and read 16 bytes */
+ ret = sp_tx_aux_rd(ctx, 0xf1);
+
+ if (ret) {
+ ret = sp_tx_rst_aux(ctx);
+ DRM_DEV_ERROR(dev, "segment read fail, reset!\n");
+ } else {
+ ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_BUFF_START,
+ MAX_DPCD_BUFFER_SIZE, buf);
+ if (ret > 0)
+ break;
+ }
+ }
+
+ if (cnt > EDID_TRY_CNT)
+ return -EIO;
+
+ return 0;
+}
+
+static int sp_tx_edid_read(struct anx7625_data *ctx,
+ u8 *pedid_blocks_buf)
+{
+ u8 offset, edid_pos;
+ int count, blocks_num;
+ u8 pblock_buf[MAX_DPCD_BUFFER_SIZE];
+ u8 i, j;
+ u8 g_edid_break = 0;
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Address initial */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_7_0, 0x50);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_15_8, 0);
+ ret |= anx7625_write_and(ctx, ctx->i2c.rx_p0_client,
+ AP_AUX_ADDR_19_16, 0xf0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "access aux channel IO error.\n");
+ return -EIO;
+ }
+
+ blocks_num = sp_tx_get_edid_block(ctx);
+ if (blocks_num < 0)
+ return blocks_num;
+
+ count = 0;
+
+ do {
+ switch (count) {
+ case 0:
+ case 1:
+ for (i = 0; i < 8; i++) {
+ offset = (i + count * 8) * MAX_DPCD_BUFFER_SIZE;
+ g_edid_break = edid_read(ctx, offset,
+ pblock_buf);
+
+ if (g_edid_break)
+ break;
+
+ memcpy(&pedid_blocks_buf[offset],
+ pblock_buf,
+ MAX_DPCD_BUFFER_SIZE);
+ }
+
+ break;
+ case 2:
+ offset = 0x00;
+
+ for (j = 0; j < 8; j++) {
+ edid_pos = (j + count * 8) *
+ MAX_DPCD_BUFFER_SIZE;
+
+ if (g_edid_break == 1)
+ break;
+
+ segments_edid_read(ctx, count / 2,
+ pblock_buf, offset);
+ memcpy(&pedid_blocks_buf[edid_pos],
+ pblock_buf,
+ MAX_DPCD_BUFFER_SIZE);
+ offset = offset + 0x10;
+ }
+
+ break;
+ case 3:
+ offset = 0x80;
+
+ for (j = 0; j < 8; j++) {
+ edid_pos = (j + count * 8) *
+ MAX_DPCD_BUFFER_SIZE;
+ if (g_edid_break == 1)
+ break;
+
+ segments_edid_read(ctx, count / 2,
+ pblock_buf, offset);
+ memcpy(&pedid_blocks_buf[edid_pos],
+ pblock_buf,
+ MAX_DPCD_BUFFER_SIZE);
+ offset = offset + 0x10;
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ count++;
+
+ } while (blocks_num >= count);
+
+ /* Check edid data */
+ if (!drm_edid_is_valid((struct edid *)pedid_blocks_buf)) {
+ DRM_DEV_ERROR(dev, "WARNING! edid check fail!\n");
+ return -EINVAL;
+ }
+
+ /* Reset aux channel */
+ sp_tx_rst_aux(ctx);
+
+ return (blocks_num + 1);
+}
+
+static void anx7625_power_on(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+
+ if (!ctx->pdata.low_power_mode) {
+ DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n");
+ return;
+ }
+
+ /* Power on pin enable */
+ gpiod_set_value(ctx->pdata.gpio_p_on, 1);
+ usleep_range(10000, 11000);
+ /* Power reset pin enable */
+ gpiod_set_value(ctx->pdata.gpio_reset, 1);
+ usleep_range(10000, 11000);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "power on !\n");
+}
+
+static void anx7625_power_standby(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+
+ if (!ctx->pdata.low_power_mode) {
+ DRM_DEV_DEBUG_DRIVER(dev, "not low power mode!\n");
+ return;
+ }
+
+ gpiod_set_value(ctx->pdata.gpio_reset, 0);
+ usleep_range(1000, 1100);
+ gpiod_set_value(ctx->pdata.gpio_p_on, 0);
+ usleep_range(1000, 1100);
+ DRM_DEV_DEBUG_DRIVER(dev, "power down\n");
+}
+
+/* Basic configurations of ANX7625 */
+static void anx7625_config(struct anx7625_data *ctx)
+{
+ anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ XTAL_FRQ_SEL, XTAL_FRQ_27M);
+}
+
+static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ int ret;
+
+ /* Reset main ocm */
+ ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40);
+ /* Disable PD */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ AP_AV_STATUS, AP_DISABLE_PD);
+ /* Release main ocm */
+ ret |= anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x00);
+
+ if (ret < 0)
+ DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n");
+ else
+ DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n");
+}
+
+static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
+{
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ /* Check interface workable */
+ ret = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+ FLASH_LOAD_STA);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "IO error : access flash load.\n");
+ return ret;
+ }
+ if ((ret & FLASH_LOAD_STA_CHK) != FLASH_LOAD_STA_CHK)
+ return -ENODEV;
+
+ anx7625_disable_pd_protocol(ctx);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Firmware ver %02x%02x,",
+ anx7625_reg_read(ctx,
+ ctx->i2c.rx_p0_client,
+ OCM_FW_VERSION),
+ anx7625_reg_read(ctx,
+ ctx->i2c.rx_p0_client,
+ OCM_FW_REVERSION));
+ DRM_DEV_DEBUG_DRIVER(dev, "Driver version %s\n",
+ ANX7625_DRV_VERSION);
+
+ return 0;
+}
+
+static void anx7625_power_on_init(struct anx7625_data *ctx)
+{
+ int retry_count, i;
+
+ for (retry_count = 0; retry_count < 3; retry_count++) {
+ anx7625_power_on(ctx);
+ anx7625_config(ctx);
+
+ for (i = 0; i < OCM_LOADING_TIME; i++) {
+ if (!anx7625_ocm_loading_check(ctx))
+ return;
+ usleep_range(1000, 1100);
+ }
+ anx7625_power_standby(ctx);
+ }
+}
+
+static void anx7625_chip_control(struct anx7625_data *ctx, int state)
+{
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "before set, power_state(%d).\n",
+ atomic_read(&ctx->power_status));
+
+ if (!ctx->pdata.low_power_mode)
+ return;
+
+ if (state) {
+ atomic_inc(&ctx->power_status);
+ if (atomic_read(&ctx->power_status) == 1)
+ anx7625_power_on_init(ctx);
+ } else {
+ if (atomic_read(&ctx->power_status)) {
+ atomic_dec(&ctx->power_status);
+
+ if (atomic_read(&ctx->power_status) == 0)
+ anx7625_power_standby(ctx);
+ }
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "after set, power_state(%d).\n",
+ atomic_read(&ctx->power_status));
+}
+
+static void anx7625_init_gpio(struct anx7625_data *platform)
+{
+ struct device *dev = &platform->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "init gpio\n");
+
+ /* Gpio for chip power enable */
+ platform->pdata.gpio_p_on =
+ devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ /* Gpio for chip reset */
+ platform->pdata.gpio_reset =
+ devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+
+ if (platform->pdata.gpio_p_on && platform->pdata.gpio_reset) {
+ platform->pdata.low_power_mode = 1;
+ DRM_DEV_DEBUG_DRIVER(dev, "low power mode, pon %d, reset %d.\n",
+ desc_to_gpio(platform->pdata.gpio_p_on),
+ desc_to_gpio(platform->pdata.gpio_reset));
+ } else {
+ platform->pdata.low_power_mode = 0;
+ DRM_DEV_DEBUG_DRIVER(dev, "not low power mode.\n");
+ }
+}
+
+static void anx7625_stop_dp_work(struct anx7625_data *ctx)
+{
+ ctx->hpd_status = 0;
+ ctx->hpd_high_cnt = 0;
+ ctx->display_timing_valid = 0;
+
+ if (ctx->pdata.low_power_mode == 0)
+ anx7625_disable_pd_protocol(ctx);
+}
+
+static void anx7625_start_dp_work(struct anx7625_data *ctx)
+{
+ int ret;
+ struct device *dev = &ctx->client->dev;
+
+ if (ctx->hpd_high_cnt >= 2) {
+ DRM_DEV_DEBUG_DRIVER(dev, "filter useless HPD\n");
+ return;
+ }
+
+ ctx->hpd_high_cnt++;
+
+ /* Not support HDCP */
+ ret = anx7625_write_and(ctx, ctx->i2c.rx_p1_client, 0xee, 0x9f);
+
+ /* Try auth flag */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xec, 0x10);
+ /* Interrupt for DRM */
+ ret |= anx7625_write_or(ctx, ctx->i2c.rx_p1_client, 0xff, 0x01);
+ if (ret < 0)
+ return;
+
+ ret = anx7625_reg_read(ctx, ctx->i2c.rx_p1_client, 0x86);
+ if (ret < 0)
+ return;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Secure OCM version=%02x\n", ret);
+}
+
+static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
+{
+ return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
+}
+
+static void anx7625_hpd_polling(struct anx7625_data *ctx)
+{
+ int ret, val;
+ struct device *dev = &ctx->client->dev;
+
+ if (atomic_read(&ctx->power_status) != 1) {
+ DRM_DEV_DEBUG_DRIVER(dev, "No need to poling HPD status.\n");
+ return;
+ }
+
+ ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
+ ctx, val,
+ ((val & HPD_STATUS) || (val < 0)),
+ 5000,
+ 5000 * 100);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "HPD polling timeout!\n");
+ } else {
+ DRM_DEV_DEBUG_DRIVER(dev, "HPD raise up.\n");
+ anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
+ INTR_ALERT_1, 0xFF);
+ anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ INTERFACE_CHANGE_INT, 0);
+ }
+
+ anx7625_start_dp_work(ctx);
+}
+
+static void anx7625_disconnect_check(struct anx7625_data *ctx)
+{
+ if (atomic_read(&ctx->power_status) == 0)
+ anx7625_stop_dp_work(ctx);
+}
+
+static void anx7625_low_power_mode_check(struct anx7625_data *ctx,
+ int state)
+{
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "low power mode check, state(%d).\n", state);
+
+ if (ctx->pdata.low_power_mode) {
+ anx7625_chip_control(ctx, state);
+ if (state)
+ anx7625_hpd_polling(ctx);
+ else
+ anx7625_disconnect_check(ctx);
+ }
+}
+
+static void anx7625_remove_edid(struct anx7625_data *ctx)
+{
+ ctx->slimport_edid_p.edid_block_num = -1;
+}
+
+static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
+{
+ struct device *dev = &ctx->client->dev;
+
+ /* HPD changed */
+ DRM_DEV_DEBUG_DRIVER(dev, "dp_hpd_change_default_func: %d\n",
+ (u32)on);
+
+ if (on == 0) {
+ DRM_DEV_DEBUG_DRIVER(dev, " HPD low\n");
+ anx7625_remove_edid(ctx);
+ anx7625_stop_dp_work(ctx);
+ } else {
+ DRM_DEV_DEBUG_DRIVER(dev, " HPD high\n");
+ anx7625_start_dp_work(ctx);
+ }
+
+ ctx->hpd_status = 1;
+}
+
+static int anx7625_hpd_change_detect(struct anx7625_data *ctx)
+{
+ int intr_vector, status;
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "power_status=%d\n",
+ (u32)atomic_read(&ctx->power_status));
+
+ status = anx7625_reg_write(ctx, ctx->i2c.tcpc_client,
+ INTR_ALERT_1, 0xFF);
+ if (status < 0) {
+ DRM_DEV_ERROR(dev, "cannot clear alert reg.\n");
+ return status;
+ }
+
+ intr_vector = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+ INTERFACE_CHANGE_INT);
+ if (intr_vector < 0) {
+ DRM_DEV_ERROR(dev, "cannot access interrupt change reg.\n");
+ return intr_vector;
+ }
+ DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x44=%x\n", intr_vector);
+ status = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client,
+ INTERFACE_CHANGE_INT,
+ intr_vector & (~intr_vector));
+ if (status < 0) {
+ DRM_DEV_ERROR(dev, "cannot clear interrupt change reg.\n");
+ return status;
+ }
+
+ if (!(intr_vector & HPD_STATUS_CHANGE))
+ return -ENOENT;
+
+ status = anx7625_reg_read(ctx, ctx->i2c.rx_p0_client,
+ SYSTEM_STSTUS);
+ if (status < 0) {
+ DRM_DEV_ERROR(dev, "cannot clear interrupt status.\n");
+ return status;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "0x7e:0x45=%x\n", status);
+ dp_hpd_change_handler(ctx, status & HPD_STATUS);
+
+ return 0;
+}
+
+static void anx7625_work_func(struct work_struct *work)
+{
+ int event;
+ struct anx7625_data *ctx = container_of(work,
+ struct anx7625_data, work);
+
+ mutex_lock(&ctx->lock);
+ event = anx7625_hpd_change_detect(ctx);
+ mutex_unlock(&ctx->lock);
+ if (event < 0)
+ return;
+
+ if (ctx->bridge_attached)
+ drm_helper_hpd_irq_event(ctx->bridge.dev);
+}
+
+static irqreturn_t anx7625_intr_hpd_isr(int irq, void *data)
+{
+ struct anx7625_data *ctx = (struct anx7625_data *)data;
+
+ if (atomic_read(&ctx->power_status) != 1)
+ return IRQ_NONE;
+
+ queue_work(ctx->workqueue, &ctx->work);
+
+ return IRQ_HANDLED;
+}
+
+static int anx7625_parse_dt(struct device *dev,
+ struct anx7625_platform_data *pdata)
+{
+ struct device_node *np = dev->of_node;
+ struct drm_panel *panel;
+ int ret;
+
+ pdata->mipi_host_node = of_graph_get_remote_node(np, 0, 0);
+ if (!pdata->mipi_host_node) {
+ DRM_DEV_ERROR(dev, "fail to get internal panel.\n");
+ return -ENODEV;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "found dsi host node.\n");
+
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
+ if (ret < 0) {
+ if (ret == -ENODEV)
+ return 0;
+ return ret;
+ }
+ if (!panel)
+ return -ENODEV;
+
+ pdata->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(pdata->panel_bridge))
+ return PTR_ERR(pdata->panel_bridge);
+ DRM_DEV_DEBUG_DRIVER(dev, "get panel node.\n");
+
+ return 0;
+}
+
+static inline struct anx7625_data *bridge_to_anx7625(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct anx7625_data, bridge);
+}
+
+static struct edid *anx7625_get_edid(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+ struct s_edid_data *p_edid = &ctx->slimport_edid_p;
+ int edid_num;
+ u8 *edid;
+
+ edid = kmalloc(FOUR_BLOCK_SIZE, GFP_KERNEL);
+ if (!edid) {
+ DRM_DEV_ERROR(dev, "Fail to allocate buffer\n");
+ return NULL;
+ }
+
+ if (ctx->slimport_edid_p.edid_block_num > 0) {
+ memcpy(edid, ctx->slimport_edid_p.edid_raw_data,
+ FOUR_BLOCK_SIZE);
+ return (struct edid *)edid;
+ }
+
+ anx7625_low_power_mode_check(ctx, 1);
+ edid_num = sp_tx_edid_read(ctx, p_edid->edid_raw_data);
+ anx7625_low_power_mode_check(ctx, 0);
+
+ if (edid_num < 1) {
+ DRM_DEV_ERROR(dev, "Fail to read EDID: %d\n", edid_num);
+ kfree(edid);
+ return NULL;
+ }
+
+ p_edid->edid_block_num = edid_num;
+
+ memcpy(edid, ctx->slimport_edid_p.edid_raw_data, FOUR_BLOCK_SIZE);
+ return (struct edid *)edid;
+}
+
+static enum drm_connector_status anx7625_sink_detect(struct anx7625_data *ctx)
+{
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "sink detect, return connected\n");
+
+ return connector_status_connected;
+}
+
+static int anx7625_attach_dsi(struct anx7625_data *ctx)
+{
+ struct mipi_dsi_device *dsi;
+ struct device *dev = &ctx->client->dev;
+ struct mipi_dsi_host *host;
+ const struct mipi_dsi_device_info info = {
+ .type = "anx7625",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ DRM_DEV_DEBUG_DRIVER(dev, "attach dsi\n");
+
+ host = of_find_mipi_dsi_host_by_node(ctx->pdata.mipi_host_node);
+ if (!host) {
+ DRM_DEV_ERROR(dev, "fail to find dsi host.\n");
+ return -EINVAL;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ DRM_DEV_ERROR(dev, "fail to create dsi device.\n");
+ return -EINVAL;
+ }
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_EOT_PACKET |
+ MIPI_DSI_MODE_VIDEO_HSE;
+
+ if (mipi_dsi_attach(dsi) < 0) {
+ DRM_DEV_ERROR(dev, "fail to attach dsi to host.\n");
+ mipi_dsi_device_unregister(dsi);
+ return -EINVAL;
+ }
+
+ ctx->dsi = dsi;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "attach dsi succeeded.\n");
+
+ return 0;
+}
+
+static void anx7625_bridge_detach(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+
+ if (ctx->dsi) {
+ mipi_dsi_detach(ctx->dsi);
+ mipi_dsi_device_unregister(ctx->dsi);
+ }
+}
+
+static int anx7625_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ int err;
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm attach\n");
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ if (!bridge->encoder) {
+ DRM_DEV_ERROR(dev, "Parent encoder object not found");
+ return -ENODEV;
+ }
+
+ err = anx7625_attach_dsi(ctx);
+ if (err) {
+ DRM_DEV_ERROR(dev, "Fail to attach to dsi : %d\n", err);
+ return err;
+ }
+
+ if (ctx->pdata.panel_bridge) {
+ err = drm_bridge_attach(bridge->encoder,
+ ctx->pdata.panel_bridge,
+ &ctx->bridge, flags);
+ if (err) {
+ DRM_DEV_ERROR(dev,
+ "Fail to attach panel bridge: %d\n", err);
+ return err;
+ }
+ }
+
+ ctx->bridge_attached = 1;
+
+ return 0;
+}
+
+static enum drm_mode_status
+anx7625_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm mode checking\n");
+
+ /* Max 1200p at 5.4 Ghz, one lane, pixel clock 300M */
+ if (mode->clock > SUPPORT_PIXEL_CLOCK) {
+ DRM_DEV_DEBUG_DRIVER(dev,
+ "drm mode invalid, pixelclock too high.\n");
+ return MODE_CLOCK_HIGH;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm mode valid.\n");
+
+ return MODE_OK;
+}
+
+static void anx7625_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *old_mode,
+ const struct drm_display_mode *mode)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm mode set\n");
+
+ ctx->dt.pixelclock.min = mode->clock;
+ ctx->dt.hactive.min = mode->hdisplay;
+ ctx->dt.hsync_len.min = mode->hsync_end - mode->hsync_start;
+ ctx->dt.hfront_porch.min = mode->hsync_start - mode->hdisplay;
+ ctx->dt.hback_porch.min = mode->htotal - mode->hsync_end;
+ ctx->dt.vactive.min = mode->vdisplay;
+ ctx->dt.vsync_len.min = mode->vsync_end - mode->vsync_start;
+ ctx->dt.vfront_porch.min = mode->vsync_start - mode->vdisplay;
+ ctx->dt.vback_porch.min = mode->vtotal - mode->vsync_end;
+
+ ctx->display_timing_valid = 1;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "pixelclock(%d).\n", ctx->dt.pixelclock.min);
+ DRM_DEV_DEBUG_DRIVER(dev, "hactive(%d), hsync(%d), hfp(%d), hbp(%d)\n",
+ ctx->dt.hactive.min,
+ ctx->dt.hsync_len.min,
+ ctx->dt.hfront_porch.min,
+ ctx->dt.hback_porch.min);
+ DRM_DEV_DEBUG_DRIVER(dev, "vactive(%d), vsync(%d), vfp(%d), vbp(%d)\n",
+ ctx->dt.vactive.min,
+ ctx->dt.vsync_len.min,
+ ctx->dt.vfront_porch.min,
+ ctx->dt.vback_porch.min);
+ DRM_DEV_DEBUG_DRIVER(dev, "hdisplay(%d),hsync_start(%d).\n",
+ mode->hdisplay,
+ mode->hsync_start);
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync_end(%d),htotal(%d).\n",
+ mode->hsync_end,
+ mode->htotal);
+ DRM_DEV_DEBUG_DRIVER(dev, "vdisplay(%d),vsync_start(%d).\n",
+ mode->vdisplay,
+ mode->vsync_start);
+ DRM_DEV_DEBUG_DRIVER(dev, "vsync_end(%d),vtotal(%d).\n",
+ mode->vsync_end,
+ mode->vtotal);
+}
+
+static bool anx7625_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+ u32 hsync, hfp, hbp, hblanking;
+ u32 adj_hsync, adj_hfp, adj_hbp, adj_hblanking, delta_adj;
+ u32 vref, adj_clock;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm mode fixup set\n");
+
+ hsync = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ hblanking = mode->htotal - mode->hdisplay;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "before mode fixup\n");
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync(%d), hfp(%d), hbp(%d), clock(%d)\n",
+ hsync, hfp, hbp, adj->clock);
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync_start(%d), hsync_end(%d), htot(%d)\n",
+ adj->hsync_start, adj->hsync_end, adj->htotal);
+
+ adj_hfp = hfp;
+ adj_hsync = hsync;
+ adj_hbp = hbp;
+ adj_hblanking = hblanking;
+
+ /* HFP needs to be even */
+ if (hfp & 0x1) {
+ adj_hfp += 1;
+ adj_hblanking += 1;
+ }
+
+ /* HBP needs to be even */
+ if (hbp & 0x1) {
+ adj_hbp -= 1;
+ adj_hblanking -= 1;
+ }
+
+ /* HSYNC needs to be even */
+ if (hsync & 0x1) {
+ if (adj_hblanking < hblanking)
+ adj_hsync += 1;
+ else
+ adj_hsync -= 1;
+ }
+
+ /*
+ * Once illegal timing detected, use default HFP, HSYNC, HBP
+ * This adjusting made for built-in eDP panel, for the externel
+ * DP monitor, may need return false.
+ */
+ if (hblanking < HBLANKING_MIN || (hfp < HP_MIN && hbp < HP_MIN)) {
+ adj_hsync = SYNC_LEN_DEF;
+ adj_hfp = HFP_HBP_DEF;
+ adj_hbp = HFP_HBP_DEF;
+ vref = adj->clock * 1000 / (adj->htotal * adj->vtotal);
+ if (hblanking < HBLANKING_MIN) {
+ delta_adj = HBLANKING_MIN - hblanking;
+ adj_clock = vref * delta_adj * adj->vtotal;
+ adj->clock += DIV_ROUND_UP(adj_clock, 1000);
+ } else {
+ delta_adj = hblanking - HBLANKING_MIN;
+ adj_clock = vref * delta_adj * adj->vtotal;
+ adj->clock -= DIV_ROUND_UP(adj_clock, 1000);
+ }
+
+ DRM_WARN("illegal hblanking timing, use default.\n");
+ DRM_WARN("hfp(%d), hbp(%d), hsync(%d).\n", hfp, hbp, hsync);
+ } else if (adj_hfp < HP_MIN) {
+ /* Adjust hfp if hfp less than HP_MIN */
+ delta_adj = HP_MIN - adj_hfp;
+ adj_hfp = HP_MIN;
+
+ /*
+ * Balance total HBlanking pixel, if HBP does not have enough
+ * space, adjust HSYNC length, otherwise adjust HBP
+ */
+ if ((adj_hbp - delta_adj) < HP_MIN)
+ /* HBP not enough space */
+ adj_hsync -= delta_adj;
+ else
+ adj_hbp -= delta_adj;
+ } else if (adj_hbp < HP_MIN) {
+ delta_adj = HP_MIN - adj_hbp;
+ adj_hbp = HP_MIN;
+
+ /*
+ * Balance total HBlanking pixel, if HBP hasn't enough space,
+ * adjust HSYNC length, otherwize adjust HBP
+ */
+ if ((adj_hfp - delta_adj) < HP_MIN)
+ /* HFP not enough space */
+ adj_hsync -= delta_adj;
+ else
+ adj_hfp -= delta_adj;
+ }
+
+ DRM_DEV_DEBUG_DRIVER(dev, "after mode fixup\n");
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync(%d), hfp(%d), hbp(%d), clock(%d)\n",
+ adj_hsync, adj_hfp, adj_hbp, adj->clock);
+
+ /* Reconstruct timing */
+ adj->hsync_start = adj->hdisplay + adj_hfp;
+ adj->hsync_end = adj->hsync_start + adj_hsync;
+ adj->htotal = adj->hsync_end + adj_hbp;
+ DRM_DEV_DEBUG_DRIVER(dev, "hsync_start(%d), hsync_end(%d), htot(%d)\n",
+ adj->hsync_start, adj->hsync_end, adj->htotal);
+
+ return true;
+}
+
+static void anx7625_bridge_enable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm enable\n");
+
+ anx7625_low_power_mode_check(ctx, 1);
+
+ if (WARN_ON(!atomic_read(&ctx->power_status)))
+ return;
+
+ anx7625_dp_start(ctx);
+}
+
+static void anx7625_bridge_disable(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ if (WARN_ON(!atomic_read(&ctx->power_status)))
+ return;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm disable\n");
+
+ anx7625_dp_stop(ctx);
+
+ anx7625_low_power_mode_check(ctx, 0);
+}
+
+static enum drm_connector_status
+anx7625_bridge_detect(struct drm_bridge *bridge)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm bridge detect\n");
+
+ return anx7625_sink_detect(ctx);
+}
+
+static struct edid *anx7625_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct anx7625_data *ctx = bridge_to_anx7625(bridge);
+ struct device *dev = &ctx->client->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "drm bridge get edid\n");
+
+ return anx7625_get_edid(ctx);
+}
+
+static const struct drm_bridge_funcs anx7625_bridge_funcs = {
+ .attach = anx7625_bridge_attach,
+ .detach = anx7625_bridge_detach,
+ .disable = anx7625_bridge_disable,
+ .mode_valid = anx7625_bridge_mode_valid,
+ .mode_set = anx7625_bridge_mode_set,
+ .mode_fixup = anx7625_bridge_mode_fixup,
+ .enable = anx7625_bridge_enable,
+ .detect = anx7625_bridge_detect,
+ .get_edid = anx7625_bridge_get_edid,
+};
+
+static int anx7625_register_i2c_dummy_clients(struct anx7625_data *ctx,
+ struct i2c_client *client)
+{
+ ctx->i2c.tx_p0_client = i2c_new_dummy_device(client->adapter,
+ TX_P0_ADDR >> 1);
+ if (!ctx->i2c.tx_p0_client)
+ return -ENOMEM;
+
+ ctx->i2c.tx_p1_client = i2c_new_dummy_device(client->adapter,
+ TX_P1_ADDR >> 1);
+ if (!ctx->i2c.tx_p1_client)
+ goto free_tx_p0;
+
+ ctx->i2c.tx_p2_client = i2c_new_dummy_device(client->adapter,
+ TX_P2_ADDR >> 1);
+ if (!ctx->i2c.tx_p2_client)
+ goto free_tx_p1;
+
+ ctx->i2c.rx_p0_client = i2c_new_dummy_device(client->adapter,
+ RX_P0_ADDR >> 1);
+ if (!ctx->i2c.rx_p0_client)
+ goto free_tx_p2;
+
+ ctx->i2c.rx_p1_client = i2c_new_dummy_device(client->adapter,
+ RX_P1_ADDR >> 1);
+ if (!ctx->i2c.rx_p1_client)
+ goto free_rx_p0;
+
+ ctx->i2c.rx_p2_client = i2c_new_dummy_device(client->adapter,
+ RX_P2_ADDR >> 1);
+ if (!ctx->i2c.rx_p2_client)
+ goto free_rx_p1;
+
+ ctx->i2c.tcpc_client = i2c_new_dummy_device(client->adapter,
+ TCPC_INTERFACE_ADDR >> 1);
+ if (!ctx->i2c.tcpc_client)
+ goto free_rx_p2;
+
+ return 0;
+
+free_rx_p2:
+ i2c_unregister_device(ctx->i2c.rx_p2_client);
+free_rx_p1:
+ i2c_unregister_device(ctx->i2c.rx_p1_client);
+free_rx_p0:
+ i2c_unregister_device(ctx->i2c.rx_p0_client);
+free_tx_p2:
+ i2c_unregister_device(ctx->i2c.tx_p2_client);
+free_tx_p1:
+ i2c_unregister_device(ctx->i2c.tx_p1_client);
+free_tx_p0:
+ i2c_unregister_device(ctx->i2c.tx_p0_client);
+
+ return -ENOMEM;
+}
+
+static void anx7625_unregister_i2c_dummy_clients(struct anx7625_data *ctx)
+{
+ i2c_unregister_device(ctx->i2c.tx_p0_client);
+ i2c_unregister_device(ctx->i2c.tx_p1_client);
+ i2c_unregister_device(ctx->i2c.tx_p2_client);
+ i2c_unregister_device(ctx->i2c.rx_p0_client);
+ i2c_unregister_device(ctx->i2c.rx_p1_client);
+ i2c_unregister_device(ctx->i2c.rx_p2_client);
+ i2c_unregister_device(ctx->i2c.tcpc_client);
+}
+
+static int anx7625_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct anx7625_data *platform;
+ struct anx7625_platform_data *pdata;
+ int ret = 0;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ DRM_DEV_ERROR(dev, "anx7625's i2c bus doesn't support\n");
+ return -ENODEV;
+ }
+
+ platform = kzalloc(sizeof(*platform), GFP_KERNEL);
+ if (!platform) {
+ DRM_DEV_ERROR(dev, "fail to allocate driver data\n");
+ return -ENOMEM;
+ }
+
+ pdata = &platform->pdata;
+
+ ret = anx7625_parse_dt(dev, pdata);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "fail to parse DT : %d\n", ret);
+ goto free_platform;
+ }
+
+ platform->client = client;
+ i2c_set_clientdata(client, platform);
+
+ anx7625_init_gpio(platform);
+
+ atomic_set(&platform->power_status, 0);
+
+ mutex_init(&platform->lock);
+
+ platform->pdata.intp_irq = client->irq;
+ if (platform->pdata.intp_irq) {
+ INIT_WORK(&platform->work, anx7625_work_func);
+ platform->workqueue = create_workqueue("anx7625_work");
+ if (!platform->workqueue) {
+ DRM_DEV_ERROR(dev, "fail to create work queue\n");
+ ret = -ENOMEM;
+ goto free_platform;
+ }
+
+ ret = devm_request_threaded_irq(dev, platform->pdata.intp_irq,
+ NULL, anx7625_intr_hpd_isr,
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "anx7625-intp", platform);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "fail to request irq\n");
+ goto free_wq;
+ }
+ }
+
+ if (anx7625_register_i2c_dummy_clients(platform, client) != 0) {
+ ret = -ENOMEM;
+ DRM_DEV_ERROR(dev, "fail to reserve I2C bus.\n");
+ goto free_wq;
+ }
+
+ if (platform->pdata.low_power_mode == 0) {
+ anx7625_disable_pd_protocol(platform);
+ atomic_set(&platform->power_status, 1);
+ }
+
+ /* Add work function */
+ if (platform->pdata.intp_irq)
+ queue_work(platform->workqueue, &platform->work);
+
+ platform->bridge.funcs = &anx7625_bridge_funcs;
+ platform->bridge.of_node = client->dev.of_node;
+ platform->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+ platform->bridge.type = DRM_MODE_CONNECTOR_eDP;
+ drm_bridge_add(&platform->bridge);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "probe done\n");
+
+ return 0;
+
+free_wq:
+ if (platform->workqueue)
+ destroy_workqueue(platform->workqueue);
+
+free_platform:
+ kfree(platform);
+
+ return ret;
+}
+
+static int anx7625_i2c_remove(struct i2c_client *client)
+{
+ struct anx7625_data *platform = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&platform->bridge);
+
+ if (platform->pdata.intp_irq)
+ destroy_workqueue(platform->workqueue);
+
+ anx7625_unregister_i2c_dummy_clients(platform);
+
+ kfree(platform);
+ return 0;
+}
+
+static const struct i2c_device_id anx7625_id[] = {
+ {"anx7625", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, anx7625_id);
+
+static const struct of_device_id anx_match_table[] = {
+ {.compatible = "analogix,anx7625",},
+ {},
+};
+
+static struct i2c_driver anx7625_driver = {
+ .driver = {
+ .name = "anx7625",
+ .of_match_table = anx_match_table,
+ },
+ .probe = anx7625_i2c_probe,
+ .remove = anx7625_i2c_remove,
+
+ .id_table = anx7625_id,
+};
+
+module_i2c_driver(anx7625_driver);
+
+MODULE_DESCRIPTION("MIPI2DP anx7625 driver");
+MODULE_AUTHOR("Xin Ji <xji@analogixsemi.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(ANX7625_DRV_VERSION);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
new file mode 100644
index 000000000000..193ad86c5450
--- /dev/null
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -0,0 +1,390 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright(c) 2020, Analogix Semiconductor. All rights reserved.
+ *
+ */
+
+#ifndef __ANX7625_H__
+#define __ANX7625_H__
+
+#define ANX7625_DRV_VERSION "0.1.04"
+
+/* Loading OCM re-trying times */
+#define OCM_LOADING_TIME 10
+
+/********* ANX7625 Register **********/
+#define TX_P0_ADDR 0x70
+#define TX_P1_ADDR 0x7A
+#define TX_P2_ADDR 0x72
+
+#define RX_P0_ADDR 0x7e
+#define RX_P1_ADDR 0x84
+#define RX_P2_ADDR 0x54
+
+#define RSVD_00_ADDR 0x00
+#define RSVD_D1_ADDR 0xD1
+#define RSVD_60_ADDR 0x60
+#define RSVD_39_ADDR 0x39
+#define RSVD_7F_ADDR 0x7F
+
+#define TCPC_INTERFACE_ADDR 0x58
+
+/* Clock frequency in Hz */
+#define XTAL_FRQ (27 * 1000000)
+
+#define POST_DIVIDER_MIN 1
+#define POST_DIVIDER_MAX 16
+#define PLL_OUT_FREQ_MIN 520000000UL
+#define PLL_OUT_FREQ_MAX 730000000UL
+#define PLL_OUT_FREQ_ABS_MIN 300000000UL
+#define PLL_OUT_FREQ_ABS_MAX 800000000UL
+#define MAX_UNSIGNED_24BIT 16777215UL
+
+/***************************************************************/
+/* Register definition of device address 0x58 */
+
+#define PRODUCT_ID_L 0x02
+#define PRODUCT_ID_H 0x03
+
+#define INTR_ALERT_1 0xCC
+#define INTR_SOFTWARE_INT BIT(3)
+#define INTR_RECEIVED_MSG BIT(5)
+
+#define SYSTEM_STSTUS 0x45
+#define INTERFACE_CHANGE_INT 0x44
+#define HPD_STATUS_CHANGE 0x80
+#define HPD_STATUS 0x80
+
+/******** END of I2C Address 0x58 ********/
+
+/***************************************************************/
+/* Register definition of device address 0x70 */
+#define I2C_ADDR_70_DPTX 0x70
+
+#define SP_TX_LINK_BW_SET_REG 0xA0
+#define SP_TX_LANE_COUNT_SET_REG 0xA1
+
+#define M_VID_0 0xC0
+#define M_VID_1 0xC1
+#define M_VID_2 0xC2
+#define N_VID_0 0xC3
+#define N_VID_1 0xC4
+#define N_VID_2 0xC5
+
+/***************************************************************/
+/* Register definition of device address 0x72 */
+#define AUX_RST 0x04
+#define RST_CTRL2 0x07
+
+#define SP_TX_TOTAL_LINE_STA_L 0x24
+#define SP_TX_TOTAL_LINE_STA_H 0x25
+#define SP_TX_ACT_LINE_STA_L 0x26
+#define SP_TX_ACT_LINE_STA_H 0x27
+#define SP_TX_V_F_PORCH_STA 0x28
+#define SP_TX_V_SYNC_STA 0x29
+#define SP_TX_V_B_PORCH_STA 0x2A
+#define SP_TX_TOTAL_PIXEL_STA_L 0x2B
+#define SP_TX_TOTAL_PIXEL_STA_H 0x2C
+#define SP_TX_ACT_PIXEL_STA_L 0x2D
+#define SP_TX_ACT_PIXEL_STA_H 0x2E
+#define SP_TX_H_F_PORCH_STA_L 0x2F
+#define SP_TX_H_F_PORCH_STA_H 0x30
+#define SP_TX_H_SYNC_STA_L 0x31
+#define SP_TX_H_SYNC_STA_H 0x32
+#define SP_TX_H_B_PORCH_STA_L 0x33
+#define SP_TX_H_B_PORCH_STA_H 0x34
+
+#define SP_TX_VID_CTRL 0x84
+#define SP_TX_BPC_MASK 0xE0
+#define SP_TX_BPC_6 0x00
+#define SP_TX_BPC_8 0x20
+#define SP_TX_BPC_10 0x40
+#define SP_TX_BPC_12 0x60
+
+#define VIDEO_BIT_MATRIX_12 0x4c
+
+#define AUDIO_CHANNEL_STATUS_1 0xd0
+#define AUDIO_CHANNEL_STATUS_2 0xd1
+#define AUDIO_CHANNEL_STATUS_3 0xd2
+#define AUDIO_CHANNEL_STATUS_4 0xd3
+#define AUDIO_CHANNEL_STATUS_5 0xd4
+#define AUDIO_CHANNEL_STATUS_6 0xd5
+#define TDM_SLAVE_MODE 0x10
+#define I2S_SLAVE_MODE 0x08
+
+#define AUDIO_CONTROL_REGISTER 0xe6
+#define TDM_TIMING_MODE 0x08
+
+#define I2C_ADDR_72_DPTX 0x72
+
+#define HP_MIN 8
+#define HBLANKING_MIN 80
+#define SYNC_LEN_DEF 32
+#define HFP_HBP_DEF ((HBLANKING_MIN - SYNC_LEN_DEF) / 2)
+#define VIDEO_CONTROL_0 0x08
+
+#define ACTIVE_LINES_L 0x14
+#define ACTIVE_LINES_H 0x15 /* Bit[7:6] are reserved */
+#define VERTICAL_FRONT_PORCH 0x16
+#define VERTICAL_SYNC_WIDTH 0x17
+#define VERTICAL_BACK_PORCH 0x18
+
+#define HORIZONTAL_TOTAL_PIXELS_L 0x19
+#define HORIZONTAL_TOTAL_PIXELS_H 0x1A /* Bit[7:6] are reserved */
+#define HORIZONTAL_ACTIVE_PIXELS_L 0x1B
+#define HORIZONTAL_ACTIVE_PIXELS_H 0x1C /* Bit[7:6] are reserved */
+#define HORIZONTAL_FRONT_PORCH_L 0x1D
+#define HORIZONTAL_FRONT_PORCH_H 0x1E /* Bit[7:4] are reserved */
+#define HORIZONTAL_SYNC_WIDTH_L 0x1F
+#define HORIZONTAL_SYNC_WIDTH_H 0x20 /* Bit[7:4] are reserved */
+#define HORIZONTAL_BACK_PORCH_L 0x21
+#define HORIZONTAL_BACK_PORCH_H 0x22 /* Bit[7:4] are reserved */
+
+/******** END of I2C Address 0x72 *********/
+/***************************************************************/
+/* Register definition of device address 0x7e */
+
+#define I2C_ADDR_7E_FLASH_CONTROLLER 0x7E
+
+#define FLASH_LOAD_STA 0x05
+#define FLASH_LOAD_STA_CHK BIT(7)
+
+#define XTAL_FRQ_SEL 0x3F
+/* bit field positions */
+#define XTAL_FRQ_SEL_POS 5
+/* bit field values */
+#define XTAL_FRQ_19M2 (0 << XTAL_FRQ_SEL_POS)
+#define XTAL_FRQ_27M (4 << XTAL_FRQ_SEL_POS)
+
+#define R_DSC_CTRL_0 0x40
+#define READ_STATUS_EN 7
+#define CLK_1MEG_RB 6 /* 1MHz clock reset; 0=reset, 0=reset release */
+#define DSC_BIST_DONE 1 /* Bit[5:1]: 1=DSC MBIST pass */
+#define DSC_EN 0x01 /* 1=DSC enabled, 0=DSC disabled */
+
+#define OCM_FW_VERSION 0x31
+#define OCM_FW_REVERSION 0x32
+
+#define AP_AUX_ADDR_7_0 0x11
+#define AP_AUX_ADDR_15_8 0x12
+#define AP_AUX_ADDR_19_16 0x13
+
+/* Bit[0:3] AUX status, bit 4 op_en, bit 5 address only */
+#define AP_AUX_CTRL_STATUS 0x14
+#define AP_AUX_CTRL_OP_EN 0x10
+#define AP_AUX_CTRL_ADDRONLY 0x20
+
+#define AP_AUX_BUFF_START 0x15
+#define PIXEL_CLOCK_L 0x25
+#define PIXEL_CLOCK_H 0x26
+
+#define AP_AUX_COMMAND 0x27 /* com+len */
+/* Bit 0&1: 3D video structure */
+/* 0x01: frame packing, 0x02:Line alternative, 0x03:Side-by-side(full) */
+#define AP_AV_STATUS 0x28
+#define AP_VIDEO_CHG BIT(2)
+#define AP_AUDIO_CHG BIT(3)
+#define AP_MIPI_MUTE BIT(4) /* 1:MIPI input mute, 0: ummute */
+#define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in 0: no RX in */
+#define AP_DISABLE_PD BIT(6)
+#define AP_DISABLE_DISPLAY BIT(7)
+/***************************************************************/
+/* Register definition of device address 0x84 */
+#define MIPI_PHY_CONTROL_3 0x03
+#define MIPI_HS_PWD_CLK 7
+#define MIPI_HS_RT_CLK 6
+#define MIPI_PD_CLK 5
+#define MIPI_CLK_RT_MANUAL_PD_EN 4
+#define MIPI_CLK_HS_MANUAL_PD_EN 3
+#define MIPI_CLK_DET_DET_BYPASS 2
+#define MIPI_CLK_MISS_CTRL 1
+#define MIPI_PD_LPTX_CH_MANUAL_PD_EN 0
+
+#define MIPI_LANE_CTRL_0 0x05
+#define MIPI_TIME_HS_PRPR 0x08
+
+/*
+ * After MIPI RX protocol layer received video frames,
+ * Protocol layer starts to reconstruct video stream from PHY
+ */
+#define MIPI_VIDEO_STABLE_CNT 0x0A
+
+#define MIPI_LANE_CTRL_10 0x0F
+#define MIPI_DIGITAL_ADJ_1 0x1B
+
+#define MIPI_PLL_M_NUM_23_16 0x1E
+#define MIPI_PLL_M_NUM_15_8 0x1F
+#define MIPI_PLL_M_NUM_7_0 0x20
+#define MIPI_PLL_N_NUM_23_16 0x21
+#define MIPI_PLL_N_NUM_15_8 0x22
+#define MIPI_PLL_N_NUM_7_0 0x23
+
+#define MIPI_DIGITAL_PLL_6 0x2A
+/* Bit[7:6]: VCO band control, only effective */
+#define MIPI_M_NUM_READY 0x10
+#define MIPI_N_NUM_READY 0x08
+#define STABLE_INTEGER_CNT_EN 0x04
+#define MIPI_PLL_TEST_BIT 0
+/* Bit[1:0]: test point output select - */
+/* 00: VCO power, 01: dvdd_pdt, 10: dvdd, 11: vcox */
+
+#define MIPI_DIGITAL_PLL_7 0x2B
+#define MIPI_PLL_FORCE_N_EN 7
+#define MIPI_PLL_FORCE_BAND_EN 6
+
+#define MIPI_PLL_VCO_TUNE_REG 4
+/* Bit[5:4]: VCO metal capacitance - */
+/* 00: +20% fast, 01: +10% fast (default), 10: typical, 11: -10% slow */
+#define MIPI_PLL_VCO_TUNE_REG_VAL 0x30
+
+#define MIPI_PLL_PLL_LDO_BIT 2
+/* Bit[3:2]: vco_v2i power - */
+/* 00: 1.40V, 01: 1.45V (default), 10: 1.50V, 11: 1.55V */
+#define MIPI_PLL_RESET_N 0x02
+#define MIPI_FRQ_FORCE_NDET 0
+
+#define MIPI_ALERT_CLR_0 0x2D
+#define HS_link_error_clear 7
+/* This bit itself is S/C, and it clears 0x84:0x31[7] */
+
+#define MIPI_ALERT_OUT_0 0x31
+#define check_sum_err_hs_sync 7
+/* This bit is cleared by 0x84:0x2D[7] */
+
+#define MIPI_DIGITAL_PLL_8 0x33
+#define MIPI_POST_DIV_VAL 4
+/* N means divided by (n+1), n = 0~15 */
+#define MIPI_EN_LOCK_FRZ 3
+#define MIPI_FRQ_COUNTER_RST 2
+#define MIPI_FRQ_SET_REG_8 1
+/* Bit 0 is reserved */
+
+#define MIPI_DIGITAL_PLL_9 0x34
+
+#define MIPI_DIGITAL_PLL_16 0x3B
+#define MIPI_FRQ_FREEZE_NDET 7
+#define MIPI_FRQ_REG_SET_ENABLE 6
+#define MIPI_REG_FORCE_SEL_EN 5
+#define MIPI_REG_SEL_DIV_REG 4
+#define MIPI_REG_FORCE_PRE_DIV_EN 3
+/* Bit 2 is reserved */
+#define MIPI_FREF_D_IND 1
+#define REF_CLK_27000KHZ 1
+#define REF_CLK_19200KHZ 0
+#define MIPI_REG_PLL_PLL_TEST_ENABLE 0
+
+#define MIPI_DIGITAL_PLL_18 0x3D
+#define FRQ_COUNT_RB_SEL 7
+#define REG_FORCE_POST_DIV_EN 6
+#define MIPI_DPI_SELECT 5
+#define SELECT_DSI 1
+#define SELECT_DPI 0
+#define REG_BAUD_DIV_RATIO 0
+
+#define H_BLANK_L 0x3E
+/* For DSC only */
+#define H_BLANK_H 0x3F
+/* For DSC only; note: bit[7:6] are reserved */
+#define MIPI_SWAP 0x4A
+#define MIPI_SWAP_CH0 7
+#define MIPI_SWAP_CH1 6
+#define MIPI_SWAP_CH2 5
+#define MIPI_SWAP_CH3 4
+#define MIPI_SWAP_CLK 3
+/* Bit[2:0] are reserved */
+
+/******** END of I2C Address 0x84 *********/
+
+/* DPCD regs */
+#define DPCD_DPCD_REV 0x00
+#define DPCD_MAX_LINK_RATE 0x01
+#define DPCD_MAX_LANE_COUNT 0x02
+
+/********* ANX7625 Register End **********/
+
+/***************** Display *****************/
+enum audio_fs {
+ AUDIO_FS_441K = 0x00,
+ AUDIO_FS_48K = 0x02,
+ AUDIO_FS_32K = 0x03,
+ AUDIO_FS_882K = 0x08,
+ AUDIO_FS_96K = 0x0a,
+ AUDIO_FS_1764K = 0x0c,
+ AUDIO_FS_192K = 0x0e
+};
+
+enum audio_wd_len {
+ AUDIO_W_LEN_16_20MAX = 0x02,
+ AUDIO_W_LEN_18_20MAX = 0x04,
+ AUDIO_W_LEN_17_20MAX = 0x0c,
+ AUDIO_W_LEN_19_20MAX = 0x08,
+ AUDIO_W_LEN_20_20MAX = 0x0a,
+ AUDIO_W_LEN_20_24MAX = 0x03,
+ AUDIO_W_LEN_22_24MAX = 0x05,
+ AUDIO_W_LEN_21_24MAX = 0x0d,
+ AUDIO_W_LEN_23_24MAX = 0x09,
+ AUDIO_W_LEN_24_24MAX = 0x0b
+};
+
+#define I2S_CH_2 0x01
+#define TDM_CH_4 0x03
+#define TDM_CH_6 0x05
+#define TDM_CH_8 0x07
+
+#define MAX_DPCD_BUFFER_SIZE 16
+
+#define ONE_BLOCK_SIZE 128
+#define FOUR_BLOCK_SIZE (128 * 4)
+
+#define MAX_EDID_BLOCK 3
+#define EDID_TRY_CNT 3
+#define SUPPORT_PIXEL_CLOCK 300000
+
+struct s_edid_data {
+ int edid_block_num;
+ u8 edid_raw_data[FOUR_BLOCK_SIZE];
+};
+
+/***************** Display End *****************/
+
+struct anx7625_platform_data {
+ struct gpio_desc *gpio_p_on;
+ struct gpio_desc *gpio_reset;
+ struct drm_bridge *panel_bridge;
+ int intp_irq;
+ u32 low_power_mode;
+ struct device_node *mipi_host_node;
+};
+
+struct anx7625_i2c_client {
+ struct i2c_client *tx_p0_client;
+ struct i2c_client *tx_p1_client;
+ struct i2c_client *tx_p2_client;
+ struct i2c_client *rx_p0_client;
+ struct i2c_client *rx_p1_client;
+ struct i2c_client *rx_p2_client;
+ struct i2c_client *tcpc_client;
+};
+
+struct anx7625_data {
+ struct anx7625_platform_data pdata;
+ atomic_t power_status;
+ int hpd_status;
+ int hpd_high_cnt;
+ /* Lock for work queue */
+ struct mutex lock;
+ struct i2c_client *client;
+ struct anx7625_i2c_client i2c;
+ struct i2c_client *last_client;
+ struct s_edid_data slimport_edid_p;
+ struct work_struct work;
+ struct workqueue_struct *workqueue;
+ char edid_block;
+ struct display_timing dt;
+ u8 display_timing_valid;
+ struct drm_bridge bridge;
+ u8 bridge_attached;
+ struct mipi_dsi_device *dsi;
+};
+
+#endif /* __ANX7625_H__ */
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
index 9fef6413741d..feb04f127b55 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c
@@ -170,7 +170,7 @@ static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data,
return dw_hdmi_set_plugged_cb(hdmi, fn, codec_dev);
}
-static struct hdmi_codec_ops dw_hdmi_i2s_ops = {
+static const struct hdmi_codec_ops dw_hdmi_i2s_ops = {
.hw_params = dw_hdmi_i2s_hw_params,
.audio_startup = dw_hdmi_i2s_audio_startup,
.audio_shutdown = dw_hdmi_i2s_audio_shutdown,
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index d89394bc5aa4..c1e35bdf9232 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -153,9 +153,10 @@ static const char * const tc358764_supplies[] = {
struct tc358764 {
struct device *dev;
struct drm_bridge bridge;
+ struct drm_connector connector;
struct regulator_bulk_data supplies[ARRAY_SIZE(tc358764_supplies)];
struct gpio_desc *gpio_reset;
- struct drm_bridge *panel_bridge;
+ struct drm_panel *panel;
int error;
};
@@ -209,6 +210,12 @@ static inline struct tc358764 *bridge_to_tc358764(struct drm_bridge *bridge)
return container_of(bridge, struct tc358764, bridge);
}
+static inline
+struct tc358764 *connector_to_tc358764(struct drm_connector *connector)
+{
+ return container_of(connector, struct tc358764, connector);
+}
+
static int tc358764_init(struct tc358764 *ctx)
{
u32 v = 0;
@@ -271,11 +278,43 @@ static void tc358764_reset(struct tc358764 *ctx)
usleep_range(1000, 2000);
}
+static int tc358764_get_modes(struct drm_connector *connector)
+{
+ struct tc358764 *ctx = connector_to_tc358764(connector);
+
+ return drm_panel_get_modes(ctx->panel, connector);
+}
+
+static const
+struct drm_connector_helper_funcs tc358764_connector_helper_funcs = {
+ .get_modes = tc358764_get_modes,
+};
+
+static const struct drm_connector_funcs tc358764_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void tc358764_disable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret = drm_panel_disable(bridge_to_tc358764(bridge)->panel);
+
+ if (ret < 0)
+ dev_err(ctx->dev, "error disabling panel (%d)\n", ret);
+}
+
static void tc358764_post_disable(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
int ret;
+ ret = drm_panel_unprepare(ctx->panel);
+ if (ret < 0)
+ dev_err(ctx->dev, "error unpreparing panel (%d)\n", ret);
tc358764_reset(ctx);
usleep_range(10000, 15000);
ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
@@ -296,28 +335,71 @@ static void tc358764_pre_enable(struct drm_bridge *bridge)
ret = tc358764_init(ctx);
if (ret < 0)
dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
+ ret = drm_panel_prepare(ctx->panel);
+ if (ret < 0)
+ dev_err(ctx->dev, "error preparing panel (%d)\n", ret);
+}
+
+static void tc358764_enable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret = drm_panel_enable(ctx->panel);
+
+ if (ret < 0)
+ dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
}
static int tc358764_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ struct drm_device *drm = bridge->dev;
+ int ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
+ ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(drm, &ctx->connector,
+ &tc358764_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(&ctx->connector,
+ &tc358764_connector_helper_funcs);
+ drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
+ ctx->connector.funcs->reset(&ctx->connector);
+ drm_connector_register(&ctx->connector);
+
+ return 0;
+}
+
+static void tc358764_detach(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
- return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
- bridge, flags);
+ drm_connector_unregister(&ctx->connector);
+ ctx->panel = NULL;
+ drm_connector_put(&ctx->connector);
}
static const struct drm_bridge_funcs tc358764_bridge_funcs = {
+ .disable = tc358764_disable,
.post_disable = tc358764_post_disable,
+ .enable = tc358764_enable,
.pre_enable = tc358764_pre_enable,
.attach = tc358764_attach,
+ .detach = tc358764_detach,
};
static int tc358764_parse_dt(struct tc358764 *ctx)
{
- struct drm_bridge *panel_bridge;
struct device *dev = ctx->dev;
- struct drm_panel *panel;
int ret;
ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
@@ -326,16 +408,12 @@ static int tc358764_parse_dt(struct tc358764 *ctx)
return PTR_ERR(ctx->gpio_reset);
}
- ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
- if (ret)
- return ret;
-
- panel_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(panel_bridge))
- return PTR_ERR(panel_bridge);
+ ret = drm_of_find_panel_or_bridge(ctx->dev->of_node, 1, 0, &ctx->panel,
+ NULL);
+ if (ret && ret != -EPROBE_DEFER)
+ dev_err(dev, "cannot find panel (%d)\n", ret);
- ctx->panel_bridge = panel_bridge;
- return 0;
+ return ret;
}
static int tc358764_configure_regulators(struct tc358764 *ctx)
@@ -381,7 +459,6 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
return ret;
ctx->bridge.funcs = &tc358764_bridge_funcs;
- ctx->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index ecdf9b01340f..6ca1debd0f88 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -106,6 +106,8 @@
#define SN_NUM_GPIOS 4
#define SN_GPIO_PHYSICAL_OFFSET 1
+#define SN_LINK_TRAINING_TRIES 10
+
/**
* struct ti_sn_bridge - Platform data for ti-sn65dsi86 driver.
* @dev: Pointer to our device.
@@ -673,6 +675,7 @@ static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
{
unsigned int val;
int ret;
+ int i;
/* set dp clk frequency value */
regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
@@ -689,19 +692,34 @@ static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
goto exit;
}
- /* Semi auto link training mode */
- regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
- ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
- val == ML_TX_MAIN_LINK_OFF ||
- val == ML_TX_NORMAL_MODE, 1000,
- 500 * 1000);
- if (ret) {
- *last_err_str = "Training complete polling failed";
- } else if (val == ML_TX_MAIN_LINK_OFF) {
- *last_err_str = "Link training failed, link is off";
- ret = -EIO;
+ /*
+ * We'll try to link train several times. As part of link training
+ * the bridge chip will write DP_SET_POWER_D0 to DP_SET_POWER. If
+ * the panel isn't ready quite it might respond NAK here which means
+ * we need to try again.
+ */
+ for (i = 0; i < SN_LINK_TRAINING_TRIES; i++) {
+ /* Semi auto link training mode */
+ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
+ val == ML_TX_MAIN_LINK_OFF ||
+ val == ML_TX_NORMAL_MODE, 1000,
+ 500 * 1000);
+ if (ret) {
+ *last_err_str = "Training complete polling failed";
+ } else if (val == ML_TX_MAIN_LINK_OFF) {
+ *last_err_str = "Link training failed, link is off";
+ ret = -EIO;
+ continue;
+ }
+
+ break;
}
+ /* If we saw quite a few retries, add a note about it */
+ if (!ret && i > SN_LINK_TRAINING_TRIES / 2)
+ DRM_DEV_INFO(pdata->dev, "Link training needed %d retries\n", i);
+
exit:
/* Disable the PLL if we failed */
if (ret)
@@ -816,8 +834,7 @@ static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
{
struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
- if (pdata->refclk)
- clk_disable_unprepare(pdata->refclk);
+ clk_disable_unprepare(pdata->refclk);
pm_runtime_put_sync(pdata->dev);
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 58527f151984..b2d20eb6c807 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -281,6 +281,10 @@ EXPORT_SYMBOL(__drm_atomic_state_free);
* needed. It will also grab the relevant CRTC lock to make sure that the state
* is consistent.
*
+ * WARNING: Drivers may only add new CRTC states to a @state if
+ * drm_atomic_state.allow_modeset is set, or if it's a driver-internal commit
+ * not created by userspace through an IOCTL call.
+ *
* Returns:
*
* Either the allocated state or the error code encoded into the pointer. When
@@ -1262,10 +1266,15 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_crtc_state *new_crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
+ unsigned requested_crtc = 0;
+ unsigned affected_crtc = 0;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+ requested_crtc |= drm_crtc_mask(crtc);
+
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
if (ret) {
@@ -1313,6 +1322,26 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
+ affected_crtc |= drm_crtc_mask(crtc);
+
+ /*
+ * For commits that allow modesets drivers can add other CRTCs to the
+ * atomic commit, e.g. when they need to reallocate global resources.
+ * This can cause spurious EBUSY, which robs compositors of a very
+ * effective sanity check for their drawing loop. Therefor only allow
+ * drivers to add unrelated CRTC states for modeset commits.
+ *
+ * FIXME: Should add affected_crtc mask to the ATOMIC IOCTL as an output
+ * so compositors know what's going on.
+ */
+ if (affected_crtc != requested_crtc) {
+ DRM_DEBUG_ATOMIC("driver added CRTC to commit: requested 0x%x, affected 0x%0x\n",
+ requested_crtc, affected_crtc);
+ WARN(!state->allow_modeset, "adding CRTC not allowed without modesets: requested 0x%x, affected 0x%0x\n",
+ requested_crtc, affected_crtc);
+ }
+
return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
@@ -1613,11 +1642,11 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
* to dmesg in case of error irq's. (Hint, you probably want to
* ratelimit this!)
*
- * The caller must drm_modeset_lock_all(), or if this is called
- * from error irq handler, it should not be enabled by default.
- * (Ie. if you are debugging errors you might not care that this
- * is racey. But calling this without all modeset locks held is
- * not inherently safe.)
+ * The caller must wrap this drm_modeset_lock_all_ctx() and
+ * drm_modeset_drop_locks(). If this is called from error irq handler, it should
+ * not be enabled by default - if you are debugging errors you might
+ * not care that this is racey, but calling this without all modeset locks held
+ * is inherently unsafe.
*/
void drm_state_dump(struct drm_device *dev, struct drm_printer *p)
{
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index f9170b4b22e7..a7bcb4b4586c 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1093,7 +1093,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
if (new_crtc_state->enable && funcs->prepare)
funcs->prepare(crtc);
else if (funcs->atomic_disable)
- funcs->atomic_disable(crtc, old_crtc_state);
+ funcs->atomic_disable(crtc, old_state);
else if (funcs->disable)
funcs->disable(crtc);
else if (funcs->dpms)
@@ -1358,7 +1358,7 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
DRM_DEBUG_ATOMIC("enabling [CRTC:%d:%s]\n",
crtc->base.id, crtc->name);
if (funcs->atomic_enable)
- funcs->atomic_enable(crtc, old_crtc_state);
+ funcs->atomic_enable(crtc, old_state);
else if (funcs->commit)
funcs->commit(crtc);
}
@@ -1736,8 +1736,11 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
* overridden by a previous synchronous update's state.
*/
if (old_plane_state->commit &&
- !try_wait_for_completion(&old_plane_state->commit->hw_done))
+ !try_wait_for_completion(&old_plane_state->commit->hw_done)) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] inflight previous commit preventing async commit\n",
+ plane->base.id, plane->name);
return -EBUSY;
+ }
return funcs->atomic_async_check(plane, new_plane_state);
}
@@ -1955,6 +1958,9 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock)
* commit with nonblocking ones. */
if (!completed && nonblock) {
spin_unlock(&crtc->commit_lock);
+ DRM_DEBUG_ATOMIC("[CRTC:%d:%s] busy with a previous commit\n",
+ crtc->base.id, crtc->name);
+
return -EBUSY;
}
} else if (i == 1) {
@@ -2129,8 +2135,12 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
/* Userspace is not allowed to get ahead of the previous
* commit with nonblocking ones. */
if (nonblock && old_conn_state->commit &&
- !try_wait_for_completion(&old_conn_state->commit->flip_done))
+ !try_wait_for_completion(&old_conn_state->commit->flip_done)) {
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] busy with a previous commit\n",
+ conn->base.id, conn->name);
+
return -EBUSY;
+ }
/* Always track connectors explicitly for e.g. link retraining. */
commit = crtc_or_fake_commit(state, new_conn_state->crtc ?: old_conn_state->crtc);
@@ -2144,8 +2154,12 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
/* Userspace is not allowed to get ahead of the previous
* commit with nonblocking ones. */
if (nonblock && old_plane_state->commit &&
- !try_wait_for_completion(&old_plane_state->commit->flip_done))
+ !try_wait_for_completion(&old_plane_state->commit->flip_done)) {
+ DRM_DEBUG_ATOMIC("[PLANE:%d:%s] busy with a previous commit\n",
+ plane->base.id, plane->name);
+
return -EBUSY;
+ }
/* Always track planes explicitly for async pageflip support. */
commit = crtc_or_fake_commit(state, new_plane_state->crtc ?: old_plane_state->crtc);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 717c4e7271b0..1913d8b4e16a 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -960,6 +960,11 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* drm_connector_update_edid_property(), usually after having parsed
* the EDID using drm_add_edid_modes(). Userspace cannot change this
* property.
+ *
+ * User-space should not parse the EDID to obtain information exposed via
+ * other KMS properties (because the kernel might apply limits, quirks or
+ * fixups to the EDID). For instance, user-space should not try to parse
+ * mode lists from the EDID.
* DPMS:
* Legacy property for setting the power state of the connector. For atomic
* drivers this is only provided for backwards compatibility with existing
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 2510717d5a08..e25181bf2c48 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -63,7 +63,7 @@ static struct drm_dp_aux_dev *drm_dp_aux_dev_get_by_minor(unsigned index)
mutex_lock(&aux_idr_mutex);
aux_dev = idr_find(&aux_idr, index);
- if (!kref_get_unless_zero(&aux_dev->refcount))
+ if (aux_dev && !kref_get_unless_zero(&aux_dev->refcount))
aux_dev = NULL;
mutex_unlock(&aux_idr_mutex);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e87542533640..153b6065ba29 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3686,9 +3686,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
WARN_ON(mgr->mst_primary);
/* get dpcd info */
- ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
- if (ret != DP_RECEIVER_CAP_SIZE) {
- DRM_DEBUG_KMS("failed to read DPCD\n");
+ ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
+ if (ret < 0) {
+ drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
+ mgr->aux->name, ret);
goto out_unlock;
}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 1543d9d10970..92e0db30fdf7 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -281,18 +281,12 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
#ifdef CONFIG_MAGIC_SYSRQ
-/*
- * restore fbcon display for all kms driver's using this helper, used for sysrq
- * and panic handling.
- */
-static bool drm_fb_helper_force_kernel_mode(void)
+/* emergency restore, don't bother with error reporting */
+static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
- bool ret, error = false;
struct drm_fb_helper *helper;
- if (list_empty(&kernel_fb_helper_list))
- return false;
-
+ mutex_lock(&kernel_fb_helper_lock);
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
struct drm_device *dev = helper->dev;
@@ -300,22 +294,12 @@ static bool drm_fb_helper_force_kernel_mode(void)
continue;
mutex_lock(&helper->lock);
- ret = drm_client_modeset_commit_locked(&helper->client);
- if (ret)
- error = true;
+ drm_client_modeset_commit_locked(&helper->client);
mutex_unlock(&helper->lock);
}
- return error;
+ mutex_unlock(&kernel_fb_helper_lock);
}
-static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
-{
- bool ret;
-
- ret = drm_fb_helper_force_kernel_mode();
- if (ret == true)
- DRM_ERROR("Failed to restore crtc configuration\n");
-}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
static void drm_fb_helper_sysrq(int dummy1)
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 722c7ebe4e88..03262472059c 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -202,6 +202,7 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_AXBXGXRX106106106106, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 69c2c079d803..d586068f5509 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -247,12 +247,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_file *file_priv = data;
struct drm_gem_object *obj = ptr;
- struct drm_device *dev = obj->dev;
- if (obj->funcs && obj->funcs->close)
+ if (obj->funcs->close)
obj->funcs->close(obj, file_priv);
- else if (dev->driver->gem_close_object)
- dev->driver->gem_close_object(obj, file_priv);
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv);
@@ -403,14 +400,10 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
if (ret)
goto err_remove;
- if (obj->funcs && obj->funcs->open) {
+ if (obj->funcs->open) {
ret = obj->funcs->open(obj, file_priv);
if (ret)
goto err_revoke;
- } else if (dev->driver->gem_open_object) {
- ret = dev->driver->gem_open_object(obj, file_priv);
- if (ret)
- goto err_revoke;
}
*handlep = handle;
@@ -982,12 +975,11 @@ drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj =
container_of(kref, struct drm_gem_object, refcount);
- struct drm_device *dev = obj->dev;
- if (obj->funcs)
- obj->funcs->free(obj);
- else if (dev->driver->gem_free_object_unlocked)
- dev->driver->gem_free_object_unlocked(obj);
+ if (WARN_ON(!obj->funcs->free))
+ return;
+
+ obj->funcs->free(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
@@ -1049,9 +1041,9 @@ EXPORT_SYMBOL(drm_gem_vm_close);
* @obj_size: the object size to be mapped, in bytes
* @vma: VMA for the area to be mapped
*
- * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
- * provided by the driver. Depending on their requirements, drivers can either
- * provide a fault handler in their gem_vm_ops (in which case any accesses to
+ * Set up the VMA to prepare mapping of the GEM object using the GEM object's
+ * vm_ops. Depending on their requirements, GEM objects can either
+ * provide a fault handler in their vm_ops (in which case any accesses to
* the object will be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring), or mmap the buffer memory
* synchronously after calling drm_gem_mmap_obj.
@@ -1065,12 +1057,11 @@ EXPORT_SYMBOL(drm_gem_vm_close);
* callers must verify access restrictions before calling this helper.
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
- * size, or if no gem_vm_ops are provided.
+ * size, or if no vm_ops are provided.
*/
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma)
{
- struct drm_device *dev = obj->dev;
int ret;
/* Check for valid size. */
@@ -1087,7 +1078,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
vma->vm_private_data = obj;
- if (obj->funcs && obj->funcs->mmap) {
+ if (obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
if (ret) {
drm_gem_object_put(obj);
@@ -1095,10 +1086,8 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
}
WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
} else {
- if (obj->funcs && obj->funcs->vm_ops)
+ if (obj->funcs->vm_ops)
vma->vm_ops = obj->funcs->vm_ops;
- else if (dev->driver->gem_vm_ops)
- vma->vm_ops = dev->driver->gem_vm_ops;
else {
drm_gem_object_put(obj);
return -EINVAL;
@@ -1198,36 +1187,30 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
drm_printf_indent(p, indent, "imported=%s\n",
obj->import_attach ? "yes" : "no");
- if (obj->funcs && obj->funcs->print_info)
+ if (obj->funcs->print_info)
obj->funcs->print_info(p, indent, obj);
}
int drm_gem_pin(struct drm_gem_object *obj)
{
- if (obj->funcs && obj->funcs->pin)
+ if (obj->funcs->pin)
return obj->funcs->pin(obj);
- else if (obj->dev->driver->gem_prime_pin)
- return obj->dev->driver->gem_prime_pin(obj);
else
return 0;
}
void drm_gem_unpin(struct drm_gem_object *obj)
{
- if (obj->funcs && obj->funcs->unpin)
+ if (obj->funcs->unpin)
obj->funcs->unpin(obj);
- else if (obj->dev->driver->gem_prime_unpin)
- obj->dev->driver->gem_prime_unpin(obj);
}
void *drm_gem_vmap(struct drm_gem_object *obj)
{
void *vaddr;
- if (obj->funcs && obj->funcs->vmap)
+ if (obj->funcs->vmap)
vaddr = obj->funcs->vmap(obj);
- else if (obj->dev->driver->gem_prime_vmap)
- vaddr = obj->dev->driver->gem_prime_vmap(obj);
else
vaddr = ERR_PTR(-EOPNOTSUPP);
@@ -1242,10 +1225,8 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
if (!vaddr)
return;
- if (obj->funcs && obj->funcs->vunmap)
+ if (obj->funcs->vunmap)
obj->funcs->vunmap(obj, vaddr);
- else if (obj->dev->driver->gem_prime_vunmap)
- obj->dev->driver->gem_prime_vunmap(obj, vaddr);
}
/**
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 59b9ca207b42..2165633c9b9e 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -171,17 +171,16 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
* GEM object state and frees the memory used to store the object itself.
* If the buffer is imported and the virtual address is set, it is released.
* Drivers using the CMA helpers should set this as their
- * &drm_driver.gem_free_object_unlocked callback.
+ * &drm_gem_object_funcs.free callback.
*/
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
- struct drm_gem_cma_object *cma_obj;
-
- cma_obj = to_drm_gem_cma_obj(gem_obj);
+ struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj);
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(cma_obj->vaddr);
if (gem_obj->import_attach) {
if (cma_obj->vaddr)
- dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
+ dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
} else if (cma_obj->vaddr) {
dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
@@ -419,7 +418,7 @@ EXPORT_SYMBOL(drm_gem_cma_print_info);
*
* This function exports a scatter/gather table suitable for PRIME usage by
* calling the standard DMA mapping API. Drivers using the CMA helpers should
- * set this as their &drm_driver.gem_prime_get_sg_table callback.
+ * set this as their &drm_gem_object_funcs.get_sg_table callback.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
@@ -525,7 +524,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
* virtual address space. Since the CMA buffers are already mapped into the
* kernel virtual address space this simply returns the cached virtual
* address. Drivers using the CMA helpers should set this as their DRM
- * driver's &drm_driver.gem_prime_vmap callback.
+ * driver's &drm_gem_object_funcs.vmap callback.
*
* Returns:
* The kernel virtual address of the CMA GEM object's backing store.
@@ -547,7 +546,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
* This function removes a buffer exported via DRM PRIME from the kernel's
* virtual address space. This is a no-op because CMA buffers cannot be
* unmapped from kernel space. Drivers using the CMA helpers should set this
- * as their &drm_driver.gem_prime_vunmap callback.
+ * as their &drm_gem_object_funcs.vunmap callback.
*/
void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
{
@@ -617,22 +616,23 @@ drm_gem_cma_prime_import_sg_table_vmap(struct drm_device *dev,
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *obj;
- void *vaddr;
+ struct dma_buf_map map;
+ int ret;
- vaddr = dma_buf_vmap(attach->dmabuf);
- if (!vaddr) {
+ ret = dma_buf_vmap(attach->dmabuf, &map);
+ if (ret) {
DRM_ERROR("Failed to vmap PRIME buffer\n");
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(ret);
}
obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
- dma_buf_vunmap(attach->dmabuf, vaddr);
+ dma_buf_vunmap(attach->dmabuf, &map);
return obj;
}
cma_obj = to_drm_gem_cma_obj(obj);
- cma_obj->vaddr = vaddr;
+ cma_obj->vaddr = map.vaddr;
return obj;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index e00616d94f26..8233bda4692f 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -261,13 +261,16 @@ EXPORT_SYMBOL(drm_gem_shmem_unpin);
static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- int ret;
+ struct dma_buf_map map;
+ int ret = 0;
if (shmem->vmap_use_count++ > 0)
return shmem->vaddr;
if (obj->import_attach) {
- shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+ ret = dma_buf_vmap(obj->import_attach->dmabuf, &map);
+ if (!ret)
+ shmem->vaddr = map.vaddr;
} else {
pgprot_t prot = PAGE_KERNEL;
@@ -279,11 +282,12 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
+ if (!shmem->vaddr)
+ ret = -ENOMEM;
}
- if (!shmem->vaddr) {
- DRM_DEBUG_KMS("Failed to vmap pages\n");
- ret = -ENOMEM;
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
goto err_put_pages;
}
@@ -333,6 +337,7 @@ EXPORT_SYMBOL(drm_gem_shmem_vmap);
static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(shmem->vaddr);
if (WARN_ON_ONCE(!shmem->vmap_use_count))
return;
@@ -341,7 +346,7 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
return;
if (obj->import_attach)
- dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
+ dma_buf_vunmap(obj->import_attach->dmabuf, &map);
else
vunmap(shmem->vaddr);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 50cad0e4a92e..9da823eb0edd 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -140,22 +140,19 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
unsigned int c = 0;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_TOPDOWN)
- pl_flag = TTM_PL_FLAG_TOPDOWN;
+ invariant_flags = TTM_PL_FLAG_TOPDOWN;
gbo->placement.placement = gbo->placements;
gbo->placement.busy_placement = gbo->placements;
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) {
gbo->placements[c].mem_type = TTM_PL_VRAM;
- gbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED |
- invariant_flags;
+ gbo->placements[c++].flags = invariant_flags;
}
if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) {
gbo->placements[c].mem_type = TTM_PL_SYSTEM;
- gbo->placements[c++].flags = TTM_PL_MASK_CACHING |
- invariant_flags;
+ gbo->placements[c++].flags = invariant_flags;
}
gbo->placement.num_placement = c;
@@ -167,58 +164,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo,
}
}
-/*
- * Note that on error, drm_gem_vram_init will free the buffer object.
- */
-
-static int drm_gem_vram_init(struct drm_device *dev,
- struct drm_gem_vram_object *gbo,
- size_t size, unsigned long pg_align)
-{
- struct drm_vram_mm *vmm = dev->vram_mm;
- struct ttm_bo_device *bdev;
- int ret;
- size_t acc_size;
-
- if (WARN_ONCE(!vmm, "VRAM MM not initialized")) {
- kfree(gbo);
- return -EINVAL;
- }
- bdev = &vmm->bdev;
-
- gbo->bo.base.funcs = &drm_gem_vram_object_funcs;
-
- ret = drm_gem_object_init(dev, &gbo->bo.base, size);
- if (ret) {
- kfree(gbo);
- return ret;
- }
-
- acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
-
- gbo->bo.bdev = bdev;
- drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
- DRM_GEM_VRAM_PL_FLAG_SYSTEM);
-
- ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
- &gbo->placement, pg_align, false, acc_size,
- NULL, NULL, ttm_buffer_object_destroy);
- if (ret)
- /*
- * A failing ttm_bo_init will call ttm_buffer_object_destroy
- * to release gbo->bo.base and kfree gbo.
- */
- return ret;
-
- return 0;
-}
-
/**
* drm_gem_vram_create() - Creates a VRAM-backed GEM object
* @dev: the DRM device
* @size: the buffer size in bytes
* @pg_align: the buffer's alignment in multiples of the page size
*
+ * GEM objects are allocated by calling struct drm_driver.gem_create_object,
+ * if set. Otherwise kzalloc() will be used. Drivers can set their own GEM
+ * object functions in struct drm_driver.gem_create_object. If no functions
+ * are set, the new GEM object will use the default functions from GEM VRAM
+ * helpers.
+ *
* Returns:
* A new instance of &struct drm_gem_vram_object on success, or
* an ERR_PTR()-encoded error code otherwise.
@@ -228,11 +185,17 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
unsigned long pg_align)
{
struct drm_gem_vram_object *gbo;
+ struct drm_gem_object *gem;
+ struct drm_vram_mm *vmm = dev->vram_mm;
+ struct ttm_bo_device *bdev;
int ret;
+ size_t acc_size;
+
+ if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
+ return ERR_PTR(-EINVAL);
if (dev->driver->gem_create_object) {
- struct drm_gem_object *gem =
- dev->driver->gem_create_object(dev, size);
+ gem = dev->driver->gem_create_object(dev, size);
if (!gem)
return ERR_PTR(-ENOMEM);
gbo = drm_gem_vram_of_gem(gem);
@@ -240,10 +203,32 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
gbo = kzalloc(sizeof(*gbo), GFP_KERNEL);
if (!gbo)
return ERR_PTR(-ENOMEM);
+ gem = &gbo->bo.base;
}
- ret = drm_gem_vram_init(dev, gbo, size, pg_align);
- if (ret < 0)
+ if (!gem->funcs)
+ gem->funcs = &drm_gem_vram_object_funcs;
+
+ ret = drm_gem_object_init(dev, gem, size);
+ if (ret) {
+ kfree(gbo);
+ return ERR_PTR(ret);
+ }
+
+ bdev = &vmm->bdev;
+ acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
+
+ gbo->bo.bdev = bdev;
+ drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
+
+ /*
+ * A failing ttm_bo_init will call ttm_buffer_object_destroy
+ * to release gbo->bo.base and kfree gbo.
+ */
+ ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
+ &gbo->placement, pg_align, false, acc_size,
+ NULL, NULL, ttm_buffer_object_destroy);
+ if (ret)
return ERR_PTR(ret);
return gbo;
@@ -301,7 +286,7 @@ static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
*/
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo)
{
- if (WARN_ON_ONCE(!gbo->pin_count))
+ if (WARN_ON_ONCE(!gbo->bo.pin_count))
return (s64)-ENODEV;
return drm_gem_vram_pg_offset(gbo) << PAGE_SHIFT;
}
@@ -310,24 +295,21 @@ EXPORT_SYMBOL(drm_gem_vram_offset);
static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo,
unsigned long pl_flag)
{
- int i, ret;
struct ttm_operation_ctx ctx = { false, false };
+ int ret;
- if (gbo->pin_count)
+ if (gbo->bo.pin_count)
goto out;
if (pl_flag)
drm_gem_vram_placement(gbo, pl_flag);
- for (i = 0; i < gbo->placement.num_placement; ++i)
- gbo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-
ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
if (ret < 0)
return ret;
out:
- ++gbo->pin_count;
+ ttm_bo_pin(&gbo->bo);
return 0;
}
@@ -369,26 +351,9 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
}
EXPORT_SYMBOL(drm_gem_vram_pin);
-static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
+static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
- int i, ret;
- struct ttm_operation_ctx ctx = { false, false };
-
- if (WARN_ON_ONCE(!gbo->pin_count))
- return 0;
-
- --gbo->pin_count;
- if (gbo->pin_count)
- return 0;
-
- for (i = 0; i < gbo->placement.num_placement ; ++i)
- gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-
- ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx);
- if (ret < 0)
- return ret;
-
- return 0;
+ ttm_bo_unpin(&gbo->bo);
}
/**
@@ -406,10 +371,11 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
ret = ttm_bo_reserve(&gbo->bo, true, false, NULL);
if (ret)
return ret;
- ret = drm_gem_vram_unpin_locked(gbo);
+
+ drm_gem_vram_unpin_locked(gbo);
ttm_bo_unreserve(&gbo->bo);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_gem_vram_unpin);
@@ -619,6 +585,23 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
kmap->virtual = NULL;
}
+static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem)
+{
+ int ret;
+
+ drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
+ ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
+ if (ret) {
+ swap(*new_mem, gbo->bo.mem);
+ drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem);
+ swap(*new_mem, gbo->bo.mem);
+ }
+ return ret;
+}
+
/*
* Helpers for struct drm_gem_object_funcs
*/
@@ -941,7 +924,7 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo,
if (!tt)
return NULL;
- ret = ttm_tt_init(tt, bo, page_flags);
+ ret = ttm_tt_init(tt, bo, page_flags, ttm_cached);
if (ret < 0)
goto err_ttm_tt_init;
@@ -966,9 +949,7 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo,
drm_gem_vram_bo_driver_evict_flags(gbo, placement);
}
-static void bo_driver_move_notify(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_resource *new_mem)
+static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_gem_vram_object *gbo;
@@ -978,7 +959,19 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo,
gbo = drm_gem_vram_of_bo(bo);
- drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
+ drm_gem_vram_bo_driver_move_notify(gbo, false, NULL);
+}
+
+static int bo_driver_move(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem)
+{
+ struct drm_gem_vram_object *gbo;
+
+ gbo = drm_gem_vram_of_bo(bo);
+
+ return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
}
static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -992,6 +985,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
case TTM_PL_VRAM:
mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base;
mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
@@ -1005,7 +999,8 @@ static struct ttm_bo_driver bo_driver = {
.ttm_tt_destroy = bo_driver_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = bo_driver_evict_flags,
- .move_notify = bo_driver_move_notify,
+ .move = bo_driver_move,
+ .delete_mem_notify = bo_driver_delete_mem_notify,
.io_mem_reserve = bo_driver_io_mem_reserve,
};
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index b65865c630b0..2bdac3557765 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -53,15 +53,15 @@ void drm_lastclose(struct drm_device *dev);
#ifdef CONFIG_PCI
/* drm_pci.c */
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
void drm_pci_agp_destroy(struct drm_device *dev);
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
#else
-static inline int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 789ee65ac1f5..d273d1a8603a 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -578,7 +578,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_legacy_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, 0),
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index c250fb5a88ca..6dba4b8ce4fe 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -139,7 +139,7 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
}
/**
- * drm_irq_by_busid - Get interrupt from bus ID
+ * drm_legacy_irq_by_busid - Get interrupt from bus ID
* @dev: DRM device
* @data: IOCTL parameter pointing to a drm_irq_busid structure
* @file_priv: DRM file private.
@@ -150,8 +150,8 @@ static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
*
* Return: 0 on success or a negative error code on failure.
*/
-int drm_irq_by_busid(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_irq_busid *p = data;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 9f955f2010c2..187b55ede62e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -386,8 +386,6 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
if (obj->funcs && obj->funcs->export)
dmabuf = obj->funcs->export(obj, flags);
- else if (dev->driver->gem_prime_export)
- dmabuf = dev->driver->gem_prime_export(obj, flags);
else
dmabuf = drm_gem_prime_export(obj, flags);
if (IS_ERR(dmabuf)) {
@@ -419,7 +417,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
* This is the PRIME export function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object.
* The actual exporting from GEM object to a dma-buf is done through the
- * &drm_driver.gem_prime_export driver callback.
+ * &drm_gem_object_funcs.export callback.
*/
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle,
@@ -622,10 +620,12 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
if (WARN_ON(dir == DMA_NONE))
return ERR_PTR(-EINVAL);
- if (obj->funcs)
- sgt = obj->funcs->get_sg_table(obj);
- else
- sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
+ if (WARN_ON(!obj->funcs->get_sg_table))
+ return ERR_PTR(-ENOSYS);
+
+ sgt = obj->funcs->get_sg_table(obj);
+ if (IS_ERR(sgt))
+ return sgt;
ret = dma_map_sgtable(attach->dev, sgt, dir,
DMA_ATTR_SKIP_CPU_SYNC);
@@ -663,38 +663,41 @@ EXPORT_SYMBOL(drm_gem_unmap_dma_buf);
/**
* drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
* @dma_buf: buffer to be mapped
+ * @map: the virtual address of the buffer
*
* Sets up a kernel virtual mapping. This can be used as the &dma_buf_ops.vmap
* callback. Calls into &drm_gem_object_funcs.vmap for device specific handling.
*
* Returns the kernel virtual address or NULL on failure.
*/
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
void *vaddr;
vaddr = drm_gem_vmap(obj);
if (IS_ERR(vaddr))
- vaddr = NULL;
+ return PTR_ERR(vaddr);
- return vaddr;
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
EXPORT_SYMBOL(drm_gem_dmabuf_vmap);
/**
* drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
* @dma_buf: buffer to be unmapped
- * @vaddr: the virtual address of the buffer
+ * @map: the virtual address of the buffer
*
* Releases a kernel virtual mapping. This can be used as the
* &dma_buf_ops.vunmap callback. Calls into &drm_gem_object_funcs.vunmap for device specific handling.
*/
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_gem_object *obj = dma_buf->priv;
- drm_gem_vunmap(obj, vaddr);
+ drm_gem_vunmap(obj, map->vaddr);
}
EXPORT_SYMBOL(drm_gem_dmabuf_vunmap);
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 74946690aba4..fa87b63e152a 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -99,7 +99,7 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
}
static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_plane *plane;
struct drm_simple_display_pipe *pipe;
@@ -113,7 +113,7 @@ static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
}
static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_simple_display_pipe *pipe;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index a9a3afaef9a1..aa270b79e585 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -468,12 +468,6 @@ static const struct drm_ioctl_desc etnaviv_ioctls[] = {
ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_RENDER_ALLOW),
};
-static const struct vm_operations_struct vm_ops = {
- .fault = etnaviv_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -490,16 +484,9 @@ static struct drm_driver etnaviv_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = etnaviv_open,
.postclose = etnaviv_postclose,
- .gem_free_object_unlocked = etnaviv_gem_free_object,
- .gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = etnaviv_gem_prime_pin,
- .gem_prime_unpin = etnaviv_gem_prime_unpin,
- .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
.gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
- .gem_prime_vmap = etnaviv_gem_prime_vmap,
- .gem_prime_vunmap = etnaviv_gem_prime_vunmap,
.gem_prime_mmap = etnaviv_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = etnaviv_debugfs_init,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index 4d8dc9236e5f..914f0867ff71 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -49,7 +49,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index d1533bdc1335..67d9a2b9ea6a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -171,7 +171,7 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return obj->ops->mmap(obj, vma);
}
-vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
+static vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@ -559,6 +559,22 @@ void etnaviv_gem_obj_add(struct drm_device *dev, struct drm_gem_object *obj)
mutex_unlock(&priv->gem_lock);
}
+static const struct vm_operations_struct vm_ops = {
+ .fault = etnaviv_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs etnaviv_gem_object_funcs = {
+ .free = etnaviv_gem_free_object,
+ .pin = etnaviv_gem_prime_pin,
+ .unpin = etnaviv_gem_prime_unpin,
+ .get_sg_table = etnaviv_gem_prime_get_sg_table,
+ .vmap = etnaviv_gem_prime_vmap,
+ .vunmap = etnaviv_gem_prime_vunmap,
+ .vm_ops = &vm_ops,
+};
+
static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
const struct etnaviv_gem_ops *ops, struct drm_gem_object **obj)
{
@@ -593,6 +609,7 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
INIT_LIST_HEAD(&etnaviv_obj->vram_list);
*obj = &etnaviv_obj->base;
+ (*obj)->funcs = &etnaviv_gem_object_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 4aa3426a9ba4..135fbff6fecf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -70,9 +70,10 @@ void etnaviv_gem_prime_unpin(struct drm_gem_object *obj)
static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
{
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(etnaviv_obj->vaddr);
+
if (etnaviv_obj->vaddr)
- dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf,
- etnaviv_obj->vaddr);
+ dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -85,9 +86,15 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj)
{
+ struct dma_buf_map map;
+ int ret;
+
lockdep_assert_held(&etnaviv_obj->lock);
- return dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf);
+ ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map);
+ if (ret)
+ return NULL;
+ return map.vaddr;
}
static int etnaviv_gem_prime_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 1c03485676ef..35f1d1dbb126 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -19,7 +19,7 @@
#include "exynos_drm_plane.h"
static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
@@ -30,7 +30,7 @@ static void exynos_drm_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index dbd80f1e4c78..fe46680ca208 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -75,11 +75,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
file->driver_priv = NULL;
}
-static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_RENDER_ALLOW),
@@ -124,16 +119,11 @@ static struct drm_driver exynos_drm_driver = {
.open = exynos_drm_open,
.lastclose = drm_fb_helper_lastclose,
.postclose = exynos_drm_postclose,
- .gem_free_object_unlocked = exynos_drm_gem_free_object,
- .gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = exynos_drm_gem_prime_import,
- .gem_prime_get_sg_table = exynos_drm_gem_prime_get_sg_table,
.gem_prime_import_sg_table = exynos_drm_gem_prime_import_sg_table,
- .gem_prime_vmap = exynos_drm_gem_prime_vmap,
- .gem_prime_vunmap = exynos_drm_gem_prime_vunmap,
.gem_prime_mmap = exynos_drm_gem_prime_mmap,
.ioctls = exynos_ioctls,
.num_ioctls = ARRAY_SIZE(exynos_ioctls),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 7777f19c9d38..4afbf5109cbf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -127,6 +127,19 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
kfree(exynos_gem);
}
+static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs exynos_drm_gem_object_funcs = {
+ .free = exynos_drm_gem_free_object,
+ .get_sg_table = exynos_drm_gem_prime_get_sg_table,
+ .vmap = exynos_drm_gem_prime_vmap,
+ .vunmap = exynos_drm_gem_prime_vunmap,
+ .vm_ops = &exynos_drm_gem_vm_ops,
+};
+
static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
@@ -141,6 +154,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
exynos_gem->size = size;
obj = &exynos_gem->base;
+ obj->funcs = &exynos_drm_gem_object_funcs;
+
ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index b9ca81a6f80f..7a9e89cfdf9c 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -43,8 +43,10 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
}
static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
@@ -62,7 +64,7 @@ static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void fsl_dcu_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 720a767118c9..bfd9a15d63b1 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -1501,8 +1501,7 @@ cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
clock_recovery = false;
DRM_DEBUG_KMS("Start train\n");
- reg = DP | DP_LINK_TRAIN_PAT_1;
-
+ reg = DP | DP_LINK_TRAIN_PAT_1;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
@@ -1575,7 +1574,7 @@ cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
cr_tries = 0;
DRM_DEBUG_KMS("\n");
- reg = DP | DP_LINK_TRAIN_PAT_2;
+ reg = DP | DP_LINK_TRAIN_PAT_2;
for (;;) {
@@ -2083,7 +2082,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
DRM_INFO("failed to retrieve link info, disabling eDP\n");
drm_encoder_cleanup(encoder);
cdv_intel_dp_destroy(connector);
- goto err_priv;
+ goto err_connector;
} else {
DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
intel_dp->dpcd[0], intel_dp->dpcd[1],
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 54d9876b5305..5ede24fb44ae 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -24,6 +24,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include "framebuffer.h"
+#include "gem.h"
#include "gtt.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
@@ -285,6 +286,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
/* Begin by trying to use stolen memory backing */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1, PAGE_SIZE);
if (backing) {
+ backing->gem.funcs = &psb_gem_object_funcs;
drm_gem_private_object_init(dev, &backing->gem, aligned_size);
return backing;
}
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index f9c4b1d76f56..8f07de83b6fb 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -18,7 +18,9 @@
#include "psb_drv.h"
-void psb_gem_free_object(struct drm_gem_object *obj)
+static vm_fault_t psb_gem_fault(struct vm_fault *vmf);
+
+static void psb_gem_free_object(struct drm_gem_object *obj)
{
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
@@ -36,6 +38,17 @@ int psb_gem_get_aperture(struct drm_device *dev, void *data,
return -EINVAL;
}
+static const struct vm_operations_struct psb_gem_vm_ops = {
+ .fault = psb_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+const struct drm_gem_object_funcs psb_gem_object_funcs = {
+ .free = psb_gem_free_object,
+ .vm_ops = &psb_gem_vm_ops,
+};
+
/**
* psb_gem_create - create a mappable object
* @file: the DRM file of the client
@@ -63,6 +76,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
dev_err(dev->dev, "no memory for %lld byte GEM object\n", size);
return -ENOSPC;
}
+ r->gem.funcs = &psb_gem_object_funcs;
/* Initialize the extra goodies GEM needs to do all the hard work */
if (drm_gem_object_init(dev, &r->gem, size) != 0) {
psb_gtt_free_range(dev, r);
@@ -123,7 +137,7 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-vm_fault_t psb_gem_fault(struct vm_fault *vmf)
+static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj;
diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h
index 4a74dc623b6b..3741a711b9fd 100644
--- a/drivers/gpu/drm/gma500/gem.h
+++ b/drivers/gpu/drm/gma500/gem.h
@@ -8,6 +8,9 @@
#ifndef _GEM_H
#define _GEM_H
+extern const struct drm_gem_object_funcs psb_gem_object_funcs;
+
extern int psb_gem_create(struct drm_file *file, struct drm_device *dev,
u64 size, u32 *handlep, int stolen, u32 align);
+
#endif
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 34b4aae9a15e..b13376a6fb91 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -480,12 +480,6 @@ static const struct dev_pm_ops psb_pm_ops = {
.runtime_idle = psb_runtime_idle,
};
-static const struct vm_operations_struct psb_gem_vm_ops = {
- .fault = psb_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations psb_gem_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -507,9 +501,6 @@ static struct drm_driver driver = {
.irq_uninstall = psb_irq_uninstall,
.irq_handler = psb_irq_handler,
- .gem_free_object_unlocked = psb_gem_free_object,
- .gem_vm_ops = &psb_gem_vm_ops,
-
.dumb_create = psb_gem_dumb_create,
.ioctls = psb_ioctls,
.fops = &psb_gem_fops,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 956926341316..c71a5a4e912c 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -735,12 +735,10 @@ extern const struct drm_connector_helper_funcs
extern const struct drm_connector_funcs psb_intel_lvds_connector_funcs;
/* gem.c */
-extern void psb_gem_free_object(struct drm_gem_object *obj);
extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern vm_fault_t psb_gem_fault(struct vm_fault *vmf);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile
index f99132715597..684ef794eb7c 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Makefile
+++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o
+hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o
obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 4d57ec688f82..a1eabadf5adb 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -23,15 +23,15 @@
#include "hibmc_drm_regs.h"
struct hibmc_display_panel_pll {
- unsigned long M;
- unsigned long N;
- unsigned long OD;
- unsigned long POD;
+ u64 M;
+ u64 N;
+ u64 OD;
+ u64 POD;
};
struct hibmc_dislay_pll_config {
- unsigned long hdisplay;
- unsigned long vdisplay;
+ u64 hdisplay;
+ u64 vdisplay;
u32 pll1_config_value;
u32 pll2_config_value;
};
@@ -52,8 +52,6 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
{1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
};
-#define PADDING(align, data) (((data) + (align) - 1) & (~((align) - 1)))
-
static int hibmc_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -104,8 +102,8 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state = plane->state;
u32 reg;
s64 gpu_addr = 0;
- unsigned int line_l;
- struct hibmc_drm_private *priv = plane->dev->dev_private;
+ u32 line_l;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(plane->dev);
struct drm_gem_vram_object *gbo;
if (!state->fb)
@@ -157,10 +155,10 @@ static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = {
.atomic_update = hibmc_plane_atomic_update,
};
-static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
+static void hibmc_crtc_dpms(struct drm_crtc *crtc, u32 dpms)
{
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
- unsigned int reg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
+ u32 reg;
reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK;
@@ -172,10 +170,10 @@ static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
}
static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
- unsigned int reg;
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ u32 reg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
@@ -191,10 +189,10 @@ static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
- unsigned int reg;
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ u32 reg;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF);
drm_crtc_vblank_off(crtc);
@@ -214,7 +212,7 @@ static enum drm_mode_status
hibmc_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
- int i = 0;
+ size_t i = 0;
int vrefresh = drm_mode_vrefresh(mode);
if (vrefresh < 59 || vrefresh > 61)
@@ -229,9 +227,9 @@ hibmc_crtc_mode_valid(struct drm_crtc *crtc,
return MODE_BAD;
}
-static unsigned int format_pll_reg(void)
+static u32 format_pll_reg(void)
{
- unsigned int pllreg = 0;
+ u32 pllreg = 0;
struct hibmc_display_panel_pll pll = {0};
/*
@@ -251,10 +249,10 @@ static unsigned int format_pll_reg(void)
return pllreg;
}
-static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll)
+static void set_vclock_hisilicon(struct drm_device *dev, u64 pll)
{
u32 val;
- struct hibmc_drm_private *priv = dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
val = readl(priv->mmio + CRT_PLL1_HS);
val &= ~(CRT_PLL1_HS_OUTER_BYPASS(1));
@@ -281,11 +279,10 @@ static void set_vclock_hisilicon(struct drm_device *dev, unsigned long pll)
writel(val, priv->mmio + CRT_PLL1_HS);
}
-static void get_pll_config(unsigned long x, unsigned long y,
- u32 *pll1, u32 *pll2)
+static void get_pll_config(u64 x, u64 y, u32 *pll1, u32 *pll2)
{
- int i;
- int count = ARRAY_SIZE(hibmc_pll_table);
+ size_t i;
+ size_t count = ARRAY_SIZE(hibmc_pll_table);
for (i = 0; i < count; i++) {
if (hibmc_pll_table[i].hdisplay == x &&
@@ -308,14 +305,14 @@ static void get_pll_config(unsigned long x, unsigned long y,
* FPGA only supports 7 predefined pixel clocks, and clock select is
* in bit 4:0 of new register 0x802a8.
*/
-static unsigned int display_ctrl_adjust(struct drm_device *dev,
- struct drm_display_mode *mode,
- unsigned int ctrl)
+static u32 display_ctrl_adjust(struct drm_device *dev,
+ struct drm_display_mode *mode,
+ u32 ctrl)
{
- unsigned long x, y;
+ u64 x, y;
u32 pll1; /* bit[31:0] of PLL */
u32 pll2; /* bit[63:32] of PLL */
- struct hibmc_drm_private *priv = dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
x = mode->hdisplay;
y = mode->vdisplay;
@@ -360,12 +357,12 @@ static unsigned int display_ctrl_adjust(struct drm_device *dev,
static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
- unsigned int val;
+ u32 val;
struct drm_display_mode *mode = &crtc->state->mode;
struct drm_device *dev = crtc->dev;
- struct hibmc_drm_private *priv = dev->dev_private;
- int width = mode->hsync_end - mode->hsync_start;
- int height = mode->vsync_end - mode->vsync_start;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
+ u32 width = mode->hsync_end - mode->hsync_start;
+ u32 height = mode->vsync_end - mode->vsync_start;
writel(format_pll_reg(), priv->mmio + HIBMC_CRT_PLL_CTRL);
writel(HIBMC_FIELD(HIBMC_CRT_HORZ_TOTAL_TOTAL, mode->htotal - 1) |
@@ -395,9 +392,9 @@ static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- unsigned int reg;
+ u32 reg;
struct drm_device *dev = crtc->dev;
- struct hibmc_drm_private *priv = dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
@@ -427,7 +424,7 @@ static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc,
static int hibmc_crtc_enable_vblank(struct drm_crtc *crtc)
{
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(1),
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
@@ -437,7 +434,7 @@ static int hibmc_crtc_enable_vblank(struct drm_crtc *crtc)
static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
{
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
writel(HIBMC_RAW_INTERRUPT_EN_VBLANK(0),
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
@@ -445,18 +442,18 @@ static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
static void hibmc_crtc_load_lut(struct drm_crtc *crtc)
{
- struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(crtc->dev);
void __iomem *mmio = priv->mmio;
u16 *r, *g, *b;
- unsigned int reg;
- int i;
+ u32 reg;
+ u32 i;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
b = g + crtc->gamma_size;
for (i = 0; i < crtc->gamma_size; i++) {
- unsigned int offset = i << 2;
+ u32 offset = i << 2;
u8 red = *r++ >> 8;
u8 green = *g++ >> 8;
u8 blue = *b++ >> 8;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 085d1b2fa8c0..0c1b40d25ac4 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -29,8 +29,7 @@ DEFINE_DRM_GEM_FOPS(hibmc_fops);
static irqreturn_t hibmc_drm_interrupt(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
- struct hibmc_drm_private *priv =
- (struct hibmc_drm_private *)dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
u32 status;
status = readl(priv->mmio + HIBMC_RAW_INTERRUPT);
@@ -122,12 +121,11 @@ static void hibmc_kms_fini(struct hibmc_drm_private *priv)
/*
* It can operate in one of three modes: 0, 1 or Sleep.
*/
-void hibmc_set_power_mode(struct hibmc_drm_private *priv,
- unsigned int power_mode)
+void hibmc_set_power_mode(struct hibmc_drm_private *priv, u32 power_mode)
{
- unsigned int control_value = 0;
+ u32 control_value = 0;
void __iomem *mmio = priv->mmio;
- unsigned int input = 1;
+ u32 input = 1;
if (power_mode > HIBMC_PW_MODE_CTL_MODE_SLEEP)
return;
@@ -145,8 +143,8 @@ void hibmc_set_power_mode(struct hibmc_drm_private *priv,
void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
{
- unsigned int gate_reg;
- unsigned int mode;
+ u32 gate_reg;
+ u32 mode;
void __iomem *mmio = priv->mmio;
/* Get current power mode. */
@@ -171,7 +169,7 @@ void hibmc_set_current_gate(struct hibmc_drm_private *priv, unsigned int gate)
static void hibmc_hw_config(struct hibmc_drm_private *priv)
{
- unsigned int reg;
+ u32 reg;
/* On hardware reset, power mode 0 is default. */
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_MODE0);
@@ -244,7 +242,7 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv)
static int hibmc_unload(struct drm_device *dev)
{
- struct hibmc_drm_private *priv = dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
drm_atomic_helper_shutdown(dev);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 197485e2fe0b..f310a83d9c48 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -14,31 +14,51 @@
#ifndef HIBMC_DRM_DRV_H
#define HIBMC_DRM_DRV_H
+#include <linux/gpio/consumer.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/i2c.h>
+
+#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_framebuffer.h>
-struct drm_device;
+struct hibmc_connector {
+ struct drm_connector base;
+
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data bit_data;
+};
struct hibmc_drm_private {
/* hw */
void __iomem *mmio;
void __iomem *fb_map;
- unsigned long fb_base;
- unsigned long fb_size;
+ resource_size_t fb_base;
+ resource_size_t fb_size;
/* drm */
struct drm_device *dev;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
- struct drm_connector connector;
+ struct hibmc_connector connector;
bool mode_config_initialized;
};
+static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector)
+{
+ return container_of(connector, struct hibmc_connector, base);
+}
+
+static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
void hibmc_set_power_mode(struct hibmc_drm_private *priv,
- unsigned int power_mode);
+ u32 power_mode);
void hibmc_set_current_gate(struct hibmc_drm_private *priv,
- unsigned int gate);
+ u32 gate);
int hibmc_de_init(struct hibmc_drm_private *priv);
int hibmc_vdac_init(struct hibmc_drm_private *priv);
@@ -47,6 +67,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc);
void hibmc_mm_fini(struct hibmc_drm_private *hibmc);
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
+int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector);
extern const struct drm_mode_config_funcs hibmc_mode_funcs;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
new file mode 100644
index 000000000000..86d712090d87
--- /dev/null
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Hisilicon Hibmc SoC drm driver
+ *
+ * Based on the bochs drm driver.
+ *
+ * Copyright (c) 2016 Huawei Limited.
+ *
+ * Author:
+ * Tian Tao <tiantao6@hisilicon.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "hibmc_drm_drv.h"
+
+#define GPIO_DATA 0x0802A0
+#define GPIO_DATA_DIRECTION 0x0802A4
+
+#define I2C_SCL_MASK BIT(0)
+#define I2C_SDA_MASK BIT(1)
+
+static void hibmc_set_i2c_signal(void *data, u32 mask, int value)
+{
+ struct hibmc_connector *hibmc_connector = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+ if (value) {
+ tmp_dir &= ~mask;
+ writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+ } else {
+ u32 tmp_data = readl(priv->mmio + GPIO_DATA);
+
+ tmp_data &= ~mask;
+ writel(tmp_data, priv->mmio + GPIO_DATA);
+
+ tmp_dir |= mask;
+ writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+ }
+}
+
+static int hibmc_get_i2c_signal(void *data, u32 mask)
+{
+ struct hibmc_connector *hibmc_connector = data;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(hibmc_connector->base.dev);
+ u32 tmp_dir = readl(priv->mmio + GPIO_DATA_DIRECTION);
+
+ if ((tmp_dir & mask) != mask) {
+ tmp_dir &= ~mask;
+ writel(tmp_dir, priv->mmio + GPIO_DATA_DIRECTION);
+ }
+
+ return (readl(priv->mmio + GPIO_DATA) & mask) ? 1 : 0;
+}
+
+static void hibmc_ddc_setsda(void *data, int state)
+{
+ hibmc_set_i2c_signal(data, I2C_SDA_MASK, state);
+}
+
+static void hibmc_ddc_setscl(void *data, int state)
+{
+ hibmc_set_i2c_signal(data, I2C_SCL_MASK, state);
+}
+
+static int hibmc_ddc_getsda(void *data)
+{
+ return hibmc_get_i2c_signal(data, I2C_SDA_MASK);
+}
+
+static int hibmc_ddc_getscl(void *data)
+{
+ return hibmc_get_i2c_signal(data, I2C_SCL_MASK);
+}
+
+int hibmc_ddc_create(struct drm_device *drm_dev,
+ struct hibmc_connector *connector)
+{
+ connector->adapter.owner = THIS_MODULE;
+ connector->adapter.class = I2C_CLASS_DDC;
+ snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus");
+ connector->adapter.dev.parent = &drm_dev->pdev->dev;
+ i2c_set_adapdata(&connector->adapter, connector);
+ connector->adapter.algo_data = &connector->bit_data;
+
+ connector->bit_data.udelay = 20;
+ connector->bit_data.timeout = usecs_to_jiffies(2000);
+ connector->bit_data.data = connector;
+ connector->bit_data.setsda = hibmc_ddc_setsda;
+ connector->bit_data.setscl = hibmc_ddc_setscl;
+ connector->bit_data.getsda = hibmc_ddc_getsda;
+ connector->bit_data.getscl = hibmc_ddc_getscl;
+
+ return i2c_bit_add_bus(&connector->adapter);
+}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 376a05ddbc2f..74e26c27d878 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -21,21 +21,41 @@
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
int count;
+ void *edid;
+ struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+
+ edid = drm_get_edid(connector, &hibmc_connector->adapter);
+ if (edid) {
+ drm_connector_update_edid_property(connector, edid);
+ count = drm_add_edid_modes(connector, edid);
+ if (count)
+ goto out;
+ }
count = drm_add_modes_noedid(connector,
connector->dev->mode_config.max_width,
connector->dev->mode_config.max_height);
drm_set_preferred_mode(connector, 1024, 768);
+out:
+ kfree(edid);
return count;
}
static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
return MODE_OK;
}
+static void hibmc_connector_destroy(struct drm_connector *connector)
+{
+ struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector);
+
+ i2c_del_adapter(&hibmc_connector->adapter);
+ drm_connector_cleanup(connector);
+}
+
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
@@ -44,7 +64,7 @@ static const struct drm_connector_helper_funcs
static const struct drm_connector_funcs hibmc_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
+ .destroy = hibmc_connector_destroy,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -56,7 +76,7 @@ static void hibmc_encoder_mode_set(struct drm_encoder *encoder,
{
u32 reg;
struct drm_device *dev = encoder->dev;
- struct hibmc_drm_private *priv = dev->dev_private;
+ struct hibmc_drm_private *priv = to_hibmc_drm_private(dev);
reg = readl(priv->mmio + HIBMC_DISPLAY_CONTROL_HISILE);
reg |= HIBMC_DISPLAY_CONTROL_FPVDDEN(1);
@@ -77,10 +97,17 @@ static const struct drm_encoder_funcs hibmc_encoder_funcs = {
int hibmc_vdac_init(struct hibmc_drm_private *priv)
{
struct drm_device *dev = priv->dev;
+ struct hibmc_connector *hibmc_connector = &priv->connector;
struct drm_encoder *encoder = &priv->encoder;
- struct drm_connector *connector = &priv->connector;
+ struct drm_connector *connector = &hibmc_connector->base;
int ret;
+ ret = hibmc_ddc_create(dev, hibmc_connector);
+ if (ret) {
+ drm_err(dev, "failed to create ddc: %d\n", ret);
+ return ret;
+ }
+
encoder->possible_crtcs = 0x1;
ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs,
DRM_MODE_ENCODER_DAC, NULL);
@@ -91,12 +118,15 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv)
drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs);
- ret = drm_connector_init(dev, connector, &hibmc_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &hibmc_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &hibmc_connector->adapter);
if (ret) {
drm_err(dev, "failed to init connector: %d\n", ret);
return ret;
}
+
drm_connector_helper_add(connector, &hibmc_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index e1108c1735ad..cfe8ff596d55 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -436,7 +436,7 @@ static void ade_dump_regs(void __iomem *base) { }
#endif
static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
@@ -459,7 +459,7 @@ static void ade_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void ade_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 8dd295dbe241..0dd477e56573 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -77,14 +77,21 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
i915_gem_object_unpin_pages(obj);
}
-static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
+static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ void *vaddr;
- return i915_gem_object_pin_map(obj, I915_MAP_WB);
+ vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
+
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
-static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index c8421fd9d2dc..3389ac972d16 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -39,9 +39,18 @@ static struct i915_global_object {
struct kmem_cache *slab_objects;
} global;
+static const struct drm_gem_object_funcs i915_gem_object_funcs;
+
struct drm_i915_gem_object *i915_gem_object_alloc(void)
{
- return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+ struct drm_i915_gem_object *obj;
+
+ obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+ if (!obj)
+ return NULL;
+ obj->base.funcs = &i915_gem_object_funcs;
+
+ return obj;
}
void i915_gem_object_free(struct drm_i915_gem_object *obj)
@@ -101,7 +110,7 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
+static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
@@ -264,7 +273,7 @@ static void __i915_gem_free_work(struct work_struct *work)
i915_gem_flush_free_objects(i915);
}
-void i915_gem_free_object(struct drm_gem_object *gem_obj)
+static void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
@@ -403,6 +412,12 @@ int __init i915_global_objects_init(void)
return 0;
}
+static const struct drm_gem_object_funcs i915_gem_object_funcs = {
+ .free = i915_gem_free_object,
+ .close = i915_gem_close_object,
+ .export = i915_gem_prime_export,
+};
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/huge_gem_object.c"
#include "selftests/huge_pages.c"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index d46db8d8f38e..eaf3d4147be0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -38,9 +38,6 @@ void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);
-void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
-void i915_gem_free_object(struct drm_gem_object *obj);
-
void i915_gem_flush_free_objects(struct drm_i915_private *i915);
struct sg_table *
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index 0845ce1ae37c..b6d43880b0c1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -82,6 +82,7 @@ static int igt_dmabuf_import(void *arg)
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
void *obj_map, *dma_map;
+ struct dma_buf_map map;
u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
int err, i;
@@ -110,7 +111,8 @@ static int igt_dmabuf_import(void *arg)
goto out_obj;
}
- dma_map = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ dma_map = err ? NULL : map.vaddr;
if (!dma_map) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@@ -150,7 +152,7 @@ static int igt_dmabuf_import(void *arg)
err = 0;
out_dma_map:
- dma_buf_vunmap(dmabuf, dma_map);
+ dma_buf_vunmap(dmabuf, &map);
out_obj:
i915_gem_object_put(obj);
out_dmabuf:
@@ -163,6 +165,7 @@ static int igt_dmabuf_import_ownership(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
+ struct dma_buf_map map;
void *ptr;
int err;
@@ -170,7 +173,8 @@ static int igt_dmabuf_import_ownership(void *arg)
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
- ptr = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@@ -178,7 +182,7 @@ static int igt_dmabuf_import_ownership(void *arg)
}
memset(ptr, 0xc5, PAGE_SIZE);
- dma_buf_vunmap(dmabuf, ptr);
+ dma_buf_vunmap(dmabuf, &map);
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
if (IS_ERR(obj)) {
@@ -212,6 +216,7 @@ static int igt_dmabuf_export_vmap(void *arg)
struct drm_i915_private *i915 = arg;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
+ struct dma_buf_map map;
void *ptr;
int err;
@@ -228,7 +233,8 @@ static int igt_dmabuf_export_vmap(void *arg)
}
i915_gem_object_put(obj);
- ptr = dma_buf_vmap(dmabuf);
+ err = dma_buf_vmap(dmabuf, &map);
+ ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
err = -ENOMEM;
@@ -244,7 +250,7 @@ static int igt_dmabuf_export_vmap(void *arg)
memset(ptr, 0xc5, dmabuf->size);
err = 0;
- dma_buf_vunmap(dmabuf, ptr);
+ dma_buf_vunmap(dmabuf, &map);
out:
dma_buf_put(dmabuf);
return err;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index be30b27e2926..2855d11c7a51 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -61,18 +61,24 @@ static void mock_dmabuf_release(struct dma_buf *dma_buf)
kfree(mock);
}
-static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
+static int mock_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
+ void *vaddr;
- return vm_map_ram(mock->pages, mock->npages, 0);
+ vaddr = vm_map_ram(mock->pages, mock->npages, 0);
+ if (!vaddr)
+ return -ENOMEM;
+ dma_buf_map_set_vaddr(map, vaddr);
+
+ return 0;
}
-static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
- vm_unmap_ram(vaddr, mock->npages);
+ vm_unmap_ram(map->vaddr, mock->npages);
}
static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index acc32066cec3..45e719c79183 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1750,12 +1750,8 @@ static struct drm_driver driver = {
.lastclose = i915_driver_lastclose,
.postclose = i915_driver_postclose,
- .gem_close_object = i915_gem_close_object,
- .gem_free_object_unlocked = i915_gem_free_object,
-
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index b6c42fd872ad..9220c9d1a4b7 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -85,9 +85,6 @@ static struct drm_driver mock_driver = {
.name = "mock",
.driver_features = DRIVER_GEM,
.release = mock_device_release,
-
- .gem_close_object = i915_gem_close_object,
- .gem_free_object_unlocked = i915_gem_free_object,
};
static void release_dev(struct device *dev)
diff --git a/drivers/gpu/drm/imx/dcss/dcss-crtc.c b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
index 36abff0890b2..8f570eb5f471 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-crtc.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-crtc.c
@@ -3,6 +3,7 @@
* Copyright 2019 NXP.
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_vblank.h>
#include <linux/platform_device.h>
@@ -77,8 +78,10 @@ static void dcss_crtc_atomic_flush(struct drm_crtc *crtc,
}
static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
@@ -111,8 +114,10 @@ static void dcss_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void dcss_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
base);
struct dcss_dev *dcss = dcss_crtc->base.dev->dev_private;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 135a62366ab8..b72e5cef7e40 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -28,19 +28,7 @@ static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
static struct drm_driver dcss_kms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
.fops = &dcss_cma_fops,
.name = "imx-dcss",
.desc = "i.MX8MQ Display Subsystem",
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
index 961d671f171b..e13652e3a115 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -267,7 +267,6 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
struct dcss_plane *dcss_plane = to_dcss_plane(plane);
struct dcss_dev *dcss = plane->dev->dev_private;
struct drm_framebuffer *fb = state->fb;
- u32 pixel_format;
struct drm_crtc_state *crtc_state;
bool modifiers_present;
u32 src_w, src_h, dst_w, dst_h;
@@ -277,7 +276,6 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
if (!fb || !state->crtc || !state->visible)
return;
- pixel_format = state->fb->format->format;
crtc_state = state->crtc->state;
modifiers_present = !!(fb->flags & DRM_MODE_FB_MODIFIERS);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index d412fc265395..7ecc27c41a6a 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -47,7 +47,7 @@ static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
}
static void ipu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
@@ -79,8 +79,10 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
}
static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index a3d1617d7c67..2329754af116 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -10,13 +10,16 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
@@ -40,12 +43,21 @@ struct ingenic_dma_hwdesc {
u32 addr;
u32 id;
u32 cmd;
-} __packed;
+} __aligned(16);
+
+struct ingenic_dma_hwdescs {
+ struct ingenic_dma_hwdesc hwdesc_f0;
+ struct ingenic_dma_hwdesc hwdesc_f1;
+ struct ingenic_dma_hwdesc hwdesc_pal;
+ u16 palette[256] __aligned(16);
+};
struct jz_soc_info {
bool needs_dev_clk;
bool has_osd;
unsigned int max_width, max_height;
+ const u32 *formats_f0, *formats_f1;
+ unsigned int num_formats_f0, num_formats_f1;
};
struct ingenic_drm {
@@ -63,17 +75,26 @@ struct ingenic_drm {
struct clk *lcd_clk, *pix_clk;
const struct jz_soc_info *soc_info;
- struct ingenic_dma_hwdesc *dma_hwdesc_f0, *dma_hwdesc_f1;
- dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1;
+ struct ingenic_dma_hwdescs *dma_hwdescs;
+ dma_addr_t dma_hwdescs_phys;
bool panel_is_sharp;
bool no_vblank;
-};
-static const u32 ingenic_drm_primary_formats[] = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
+ /*
+ * clk_mutex is used to synchronize the pixel clock rate update with
+ * the VBLANK. When the pixel clock's parent clock needs to be updated,
+ * clock_nb's notifier function will lock the mutex, then wait until the
+ * next VBLANK. At that point, the parent clock's rate can be updated,
+ * and the mutex is then unlocked. If an atomic commit happens in the
+ * meantime, it will lock on the mutex, effectively waiting until the
+ * clock update process finishes. Finally, the pixel clock's rate will
+ * be recomputed when the mutex has been released, in the pending atomic
+ * commit, or a future one.
+ */
+ struct mutex clk_mutex;
+ bool update_clk_rate;
+ struct notifier_block clock_nb;
};
static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg)
@@ -111,8 +132,31 @@ static inline struct ingenic_drm *drm_crtc_get_priv(struct drm_crtc *crtc)
return container_of(crtc, struct ingenic_drm, crtc);
}
+static inline struct ingenic_drm *drm_nb_get_priv(struct notifier_block *nb)
+{
+ return container_of(nb, struct ingenic_drm, clock_nb);
+}
+
+static int ingenic_drm_update_pixclk(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ struct ingenic_drm *priv = drm_nb_get_priv(nb);
+
+ switch (action) {
+ case PRE_RATE_CHANGE:
+ mutex_lock(&priv->clk_mutex);
+ priv->update_clk_rate = true;
+ drm_crtc_wait_one_vblank(&priv->crtc);
+ return NOTIFY_OK;
+ default:
+ mutex_unlock(&priv->clk_mutex);
+ return NOTIFY_OK;
+ }
+}
+
static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
@@ -126,7 +170,7 @@ static void ingenic_drm_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void ingenic_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
+ struct drm_atomic_state *state)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
unsigned int var;
@@ -200,6 +244,12 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL;
+ if (state->gamma_lut &&
+ drm_color_lut_size(state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) {
+ dev_dbg(priv->dev, "Invalid palette size\n");
+ return -EINVAL;
+ }
+
if (drm_atomic_crtc_needs_modeset(state) && priv->soc_info->has_osd) {
f1_state = drm_atomic_get_plane_state(state->state, &priv->f1);
if (IS_ERR(f1_state))
@@ -276,8 +326,14 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
if (drm_atomic_crtc_needs_modeset(state)) {
ingenic_drm_crtc_update_timings(priv, &state->mode);
+ priv->update_clk_rate = true;
+ }
+ if (priv->update_clk_rate) {
+ mutex_lock(&priv->clk_mutex);
clk_set_rate(priv->pix_clk, state->adjusted_mode.clock * 1000);
+ priv->update_clk_rate = false;
+ mutex_unlock(&priv->clk_mutex);
}
if (event) {
@@ -398,24 +454,39 @@ void ingenic_drm_plane_config(struct device *dev,
case DRM_FORMAT_RGB565:
ctrl |= JZ_LCD_OSDCTRL_BPP_15_16;
break;
+ case DRM_FORMAT_RGB888:
+ ctrl |= JZ_LCD_OSDCTRL_BPP_24_COMP;
+ break;
case DRM_FORMAT_XRGB8888:
ctrl |= JZ_LCD_OSDCTRL_BPP_18_24;
break;
+ case DRM_FORMAT_XRGB2101010:
+ ctrl |= JZ_LCD_OSDCTRL_BPP_30;
+ break;
}
regmap_update_bits(priv->map, JZ_REG_LCD_OSDCTRL,
JZ_LCD_OSDCTRL_BPP_MASK, ctrl);
} else {
switch (fourcc) {
+ case DRM_FORMAT_C8:
+ ctrl |= JZ_LCD_CTRL_BPP_8;
+ break;
case DRM_FORMAT_XRGB1555:
ctrl |= JZ_LCD_CTRL_RGB555;
fallthrough;
case DRM_FORMAT_RGB565:
ctrl |= JZ_LCD_CTRL_BPP_15_16;
break;
+ case DRM_FORMAT_RGB888:
+ ctrl |= JZ_LCD_CTRL_BPP_24_COMP;
+ break;
case DRM_FORMAT_XRGB8888:
ctrl |= JZ_LCD_CTRL_BPP_18_24;
break;
+ case DRM_FORMAT_XRGB2101010:
+ ctrl |= JZ_LCD_CTRL_BPP_30;
+ break;
}
regmap_update_bits(priv->map, JZ_REG_LCD_CTRL,
@@ -440,32 +511,64 @@ void ingenic_drm_plane_config(struct device *dev,
}
}
+static void ingenic_drm_update_palette(struct ingenic_drm *priv,
+ const struct drm_color_lut *lut)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dma_hwdescs->palette); i++) {
+ u16 color = drm_color_lut_extract(lut[i].red, 5) << 11
+ | drm_color_lut_extract(lut[i].green, 6) << 5
+ | drm_color_lut_extract(lut[i].blue, 5);
+
+ priv->dma_hwdescs->palette[i] = color;
+ }
+}
+
static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *oldstate)
{
struct ingenic_drm *priv = drm_device_get_priv(plane->dev);
struct drm_plane_state *state = plane->state;
+ struct drm_crtc_state *crtc_state;
struct ingenic_dma_hwdesc *hwdesc;
- unsigned int width, height, cpp;
+ unsigned int width, height, cpp, offset;
dma_addr_t addr;
+ u32 fourcc;
if (state && state->fb) {
+ crtc_state = state->crtc->state;
+
addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
width = state->src_w >> 16;
height = state->src_h >> 16;
cpp = state->fb->format->cpp[0];
if (priv->soc_info->has_osd && plane->type == DRM_PLANE_TYPE_OVERLAY)
- hwdesc = priv->dma_hwdesc_f0;
+ hwdesc = &priv->dma_hwdescs->hwdesc_f0;
else
- hwdesc = priv->dma_hwdesc_f1;
+ hwdesc = &priv->dma_hwdescs->hwdesc_f1;
hwdesc->addr = addr;
hwdesc->cmd = JZ_LCD_CMD_EOF_IRQ | (width * height * cpp / 4);
- if (drm_atomic_crtc_needs_modeset(state->crtc->state))
- ingenic_drm_plane_config(priv->dev, plane,
- state->fb->format->format);
+ if (drm_atomic_crtc_needs_modeset(crtc_state)) {
+ fourcc = state->fb->format->format;
+
+ ingenic_drm_plane_config(priv->dev, plane, fourcc);
+
+ if (fourcc == DRM_FORMAT_C8)
+ offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_pal);
+ else
+ offset = offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
+
+ priv->dma_hwdescs->hwdesc_f0.next = priv->dma_hwdescs_phys + offset;
+
+ crtc_state->color_mgmt_changed = fourcc == DRM_FORMAT_C8;
+ }
+
+ if (crtc_state->color_mgmt_changed)
+ ingenic_drm_update_palette(priv, crtc_state->gamma_lut->data);
}
}
@@ -686,6 +789,11 @@ static void ingenic_drm_unbind_all(void *d)
component_unbind_all(priv->dev, &priv->drm);
}
+static void __maybe_unused ingenic_drm_release_rmem(void *d)
+{
+ of_reserved_mem_device_release(d);
+}
+
static int ingenic_drm_bind(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -699,6 +807,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
void __iomem *base;
long parent_rate;
unsigned int i, clone_mask = 0;
+ dma_addr_t dma_hwdesc_phys_f0, dma_hwdesc_phys_f1;
int ret, irq;
soc_info = of_device_get_match_data(dev);
@@ -707,6 +816,19 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
return -EINVAL;
}
+ if (IS_ENABLED(CONFIG_OF_RESERVED_MEM)) {
+ ret = of_reserved_mem_device_init(dev);
+
+ if (ret && ret != -ENODEV)
+ dev_warn(dev, "Failed to get reserved memory: %d\n", ret);
+
+ if (!ret) {
+ ret = devm_add_action_or_reset(dev, ingenic_drm_release_rmem, dev);
+ if (ret)
+ return ret;
+ }
+ }
+
priv = devm_drm_dev_alloc(dev, &ingenic_drm_driver_data,
struct ingenic_drm, drm);
if (IS_ERR(priv))
@@ -760,26 +882,34 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
return PTR_ERR(priv->pix_clk);
}
- priv->dma_hwdesc_f1 = dmam_alloc_coherent(dev, sizeof(*priv->dma_hwdesc_f1),
- &priv->dma_hwdesc_phys_f1,
- GFP_KERNEL);
- if (!priv->dma_hwdesc_f1)
+ priv->dma_hwdescs = dmam_alloc_coherent(dev,
+ sizeof(*priv->dma_hwdescs),
+ &priv->dma_hwdescs_phys,
+ GFP_KERNEL);
+ if (!priv->dma_hwdescs)
return -ENOMEM;
- priv->dma_hwdesc_f1->next = priv->dma_hwdesc_phys_f1;
- priv->dma_hwdesc_f1->id = 0xf1;
- if (priv->soc_info->has_osd) {
- priv->dma_hwdesc_f0 = dmam_alloc_coherent(dev,
- sizeof(*priv->dma_hwdesc_f0),
- &priv->dma_hwdesc_phys_f0,
- GFP_KERNEL);
- if (!priv->dma_hwdesc_f0)
- return -ENOMEM;
+ /* Configure DMA hwdesc for foreground0 plane */
+ dma_hwdesc_phys_f0 = priv->dma_hwdescs_phys
+ + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
+ priv->dma_hwdescs->hwdesc_f0.next = dma_hwdesc_phys_f0;
+ priv->dma_hwdescs->hwdesc_f0.id = 0xf0;
- priv->dma_hwdesc_f0->next = priv->dma_hwdesc_phys_f0;
- priv->dma_hwdesc_f0->id = 0xf0;
- }
+ /* Configure DMA hwdesc for foreground1 plane */
+ dma_hwdesc_phys_f1 = priv->dma_hwdescs_phys
+ + offsetof(struct ingenic_dma_hwdescs, hwdesc_f1);
+ priv->dma_hwdescs->hwdesc_f1.next = dma_hwdesc_phys_f1;
+ priv->dma_hwdescs->hwdesc_f1.id = 0xf1;
+
+ /* Configure DMA hwdesc for palette */
+ priv->dma_hwdescs->hwdesc_pal.next = priv->dma_hwdescs_phys
+ + offsetof(struct ingenic_dma_hwdescs, hwdesc_f0);
+ priv->dma_hwdescs->hwdesc_pal.id = 0xc0;
+ priv->dma_hwdescs->hwdesc_pal.addr = priv->dma_hwdescs_phys
+ + offsetof(struct ingenic_dma_hwdescs, palette);
+ priv->dma_hwdescs->hwdesc_pal.cmd = JZ_LCD_CMD_ENABLE_PAL
+ | (sizeof(priv->dma_hwdescs->palette) / 4);
if (soc_info->has_osd)
priv->ipu_plane = drm_plane_from_index(drm, 0);
@@ -788,8 +918,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
ret = drm_universal_plane_init(drm, &priv->f1, 1,
&ingenic_drm_primary_plane_funcs,
- ingenic_drm_primary_formats,
- ARRAY_SIZE(ingenic_drm_primary_formats),
+ priv->soc_info->formats_f1,
+ priv->soc_info->num_formats_f1,
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
dev_err(dev, "Failed to register plane: %i\n", ret);
@@ -805,14 +935,17 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
return ret;
}
+ drm_crtc_enable_color_mgmt(&priv->crtc, 0, false,
+ ARRAY_SIZE(priv->dma_hwdescs->palette));
+
if (soc_info->has_osd) {
drm_plane_helper_add(&priv->f0,
&ingenic_drm_plane_helper_funcs);
ret = drm_universal_plane_init(drm, &priv->f0, 1,
&ingenic_drm_primary_plane_funcs,
- ingenic_drm_primary_formats,
- ARRAY_SIZE(ingenic_drm_primary_formats),
+ priv->soc_info->formats_f0,
+ priv->soc_info->num_formats_f0,
NULL, DRM_PLANE_TYPE_OVERLAY,
NULL);
if (ret) {
@@ -927,23 +1060,35 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
}
/* Set address of our DMA descriptor chain */
- regmap_write(priv->map, JZ_REG_LCD_DA0, priv->dma_hwdesc_phys_f0);
- regmap_write(priv->map, JZ_REG_LCD_DA1, priv->dma_hwdesc_phys_f1);
+ regmap_write(priv->map, JZ_REG_LCD_DA0, dma_hwdesc_phys_f0);
+ regmap_write(priv->map, JZ_REG_LCD_DA1, dma_hwdesc_phys_f1);
/* Enable OSD if available */
if (soc_info->has_osd)
regmap_write(priv->map, JZ_REG_LCD_OSDC, JZ_LCD_OSDC_OSDEN);
+ mutex_init(&priv->clk_mutex);
+ priv->clock_nb.notifier_call = ingenic_drm_update_pixclk;
+
+ parent_clk = clk_get_parent(priv->pix_clk);
+ ret = clk_notifier_register(parent_clk, &priv->clock_nb);
+ if (ret) {
+ dev_err(dev, "Unable to register clock notifier\n");
+ goto err_devclk_disable;
+ }
+
ret = drm_dev_register(drm, 0);
if (ret) {
dev_err(dev, "Failed to register DRM driver\n");
- goto err_devclk_disable;
+ goto err_clk_notifier_unregister;
}
drm_fbdev_generic_setup(drm, 32);
return 0;
+err_clk_notifier_unregister:
+ clk_notifier_unregister(parent_clk, &priv->clock_nb);
err_devclk_disable:
if (priv->lcd_clk)
clk_disable_unprepare(priv->lcd_clk);
@@ -965,7 +1110,9 @@ static int compare_of(struct device *dev, void *data)
static void ingenic_drm_unbind(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
+ struct clk *parent_clk = clk_get_parent(priv->pix_clk);
+ clk_notifier_unregister(parent_clk, &priv->clock_nb);
if (priv->lcd_clk)
clk_disable_unprepare(priv->lcd_clk);
clk_disable_unprepare(priv->pix_clk);
@@ -1011,11 +1158,50 @@ static int ingenic_drm_remove(struct platform_device *pdev)
return 0;
}
+static const u32 jz4740_formats[] = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const u32 jz4725b_formats_f1[] = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const u32 jz4725b_formats_f0[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const u32 jz4770_formats_f1[] = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB2101010,
+};
+
+static const u32 jz4770_formats_f0[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB2101010,
+};
+
static const struct jz_soc_info jz4740_soc_info = {
.needs_dev_clk = true,
.has_osd = false,
.max_width = 800,
.max_height = 600,
+ .formats_f1 = jz4740_formats,
+ .num_formats_f1 = ARRAY_SIZE(jz4740_formats),
+ /* JZ4740 has only one plane */
};
static const struct jz_soc_info jz4725b_soc_info = {
@@ -1023,6 +1209,10 @@ static const struct jz_soc_info jz4725b_soc_info = {
.has_osd = true,
.max_width = 800,
.max_height = 600,
+ .formats_f1 = jz4725b_formats_f1,
+ .num_formats_f1 = ARRAY_SIZE(jz4725b_formats_f1),
+ .formats_f0 = jz4725b_formats_f0,
+ .num_formats_f0 = ARRAY_SIZE(jz4725b_formats_f0),
};
static const struct jz_soc_info jz4770_soc_info = {
@@ -1030,6 +1220,10 @@ static const struct jz_soc_info jz4770_soc_info = {
.has_osd = true,
.max_width = 1280,
.max_height = 720,
+ .formats_f1 = jz4770_formats_f1,
+ .num_formats_f1 = ARRAY_SIZE(jz4770_formats_f1),
+ .formats_f0 = jz4770_formats_f0,
+ .num_formats_f0 = ARRAY_SIZE(jz4770_formats_f0),
};
static const struct of_device_id ingenic_drm_of_match[] = {
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h
index 43f7d959cff7..9b48ce02803d 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.h
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.h
@@ -124,6 +124,8 @@
#define JZ_LCD_CTRL_BPP_8 0x3
#define JZ_LCD_CTRL_BPP_15_16 0x4
#define JZ_LCD_CTRL_BPP_18_24 0x5
+#define JZ_LCD_CTRL_BPP_24_COMP 0x6
+#define JZ_LCD_CTRL_BPP_30 0x7
#define JZ_LCD_CTRL_BPP_MASK (JZ_LCD_CTRL_RGB555 | 0x7)
#define JZ_LCD_CMD_SOF_IRQ BIT(31)
@@ -145,6 +147,7 @@
#define JZ_LCD_OSDCTRL_CHANGE BIT(3)
#define JZ_LCD_OSDCTRL_BPP_15_16 0x4
#define JZ_LCD_OSDCTRL_BPP_18_24 0x5
+#define JZ_LCD_OSDCTRL_BPP_24_COMP 0x6
#define JZ_LCD_OSDCTRL_BPP_30 0x7
#define JZ_LCD_OSDCTRL_BPP_MASK (JZ_LCD_OSDCTRL_RGB555 | 0x7)
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index c592957ed07f..f9b5f450a9cb 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -331,8 +331,8 @@ static int mcde_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- ret = -EINVAL;
+ if (irq < 0) {
+ ret = irq;
goto clk_disable;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index ac038572164d..c28f5d7aac1a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -517,7 +517,7 @@ void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane,
}
static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
@@ -542,7 +542,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 59c85c63b7cc..7f3398a7c2b0 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -324,18 +324,13 @@ struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
static struct drm_driver mtk_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .gem_free_object_unlocked = mtk_drm_gem_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = mtk_drm_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = mtk_drm_gem_prime_import,
- .gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
- .gem_prime_vmap = mtk_drm_gem_prime_vmap,
- .gem_prime_vunmap = mtk_drm_gem_prime_vunmap,
.fops = &mtk_drm_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index 0583e557ad37..cdd1a6e61564 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -8,11 +8,20 @@
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_prime.h>
#include "mtk_drm_drv.h"
#include "mtk_drm_gem.h"
+static const struct drm_gem_object_funcs mtk_drm_gem_object_funcs = {
+ .free = mtk_drm_gem_free_object,
+ .get_sg_table = mtk_gem_prime_get_sg_table,
+ .vmap = mtk_drm_gem_prime_vmap,
+ .vunmap = mtk_drm_gem_prime_vunmap,
+ .vm_ops = &drm_gem_cma_vm_ops,
+};
+
static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
@@ -25,6 +34,8 @@ static struct mtk_drm_gem_obj *mtk_drm_gem_init(struct drm_device *dev,
if (!mtk_gem_obj)
return ERR_PTR(-ENOMEM);
+ mtk_gem_obj->base.funcs = &mtk_drm_gem_object_funcs;
+
ret = drm_gem_object_init(dev, &mtk_gem_obj->base, size);
if (ret < 0) {
DRM_ERROR("failed to initialize gem object\n");
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 2854272dc2d9..247ce085886b 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -82,7 +82,7 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
@@ -118,7 +118,7 @@ static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state;
@@ -146,7 +146,7 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
@@ -171,7 +171,7 @@ static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index f56414a06ec4..6a24ce245a37 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -11,6 +11,7 @@
#include <linux/ktime.h>
#include <linux/bits.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
#include <drm/drm_mode.h>
@@ -706,8 +707,10 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
}
static void dpu_crtc_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
struct drm_encoder *encoder;
@@ -770,7 +773,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
}
static void dpu_crtc_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index a0253297bc76..6b03ceeb5ba1 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -264,7 +264,7 @@ static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
@@ -284,7 +284,7 @@ static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct mdp4_kms *mdp4_kms = get_kms(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index c39dad151bb6..747dd8a7aa6e 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -483,7 +483,7 @@ static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
}
static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
@@ -529,7 +529,7 @@ static void mdp5_crtc_vblank_on(struct drm_crtc *crtc)
}
static void mdp5_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 49685571dc0e..aa4509766d64 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -972,12 +972,6 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
};
-static const struct vm_operations_struct vm_ops = {
- .fault = msm_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -1003,18 +997,11 @@ static struct drm_driver msm_driver = {
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
.irq_uninstall = msm_irq_uninstall,
- .gem_free_object_unlocked = msm_gem_free_object,
- .gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = msm_gem_prime_pin,
- .gem_prime_unpin = msm_gem_prime_unpin,
- .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
.gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
- .gem_prime_vmap = msm_gem_prime_vmap,
- .gem_prime_vunmap = msm_gem_prime_vunmap,
.gem_prime_mmap = msm_gem_prime_mmap,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = msm_debugfs_init,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b9dd8f8f4887..c45789f36e48 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -276,7 +276,6 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 04be4cfcccc1..2e1bce7c0b19 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -236,7 +236,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
-vm_fault_t msm_gem_fault(struct vm_fault *vmf)
+static vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@ -1000,6 +1000,22 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
return ret;
}
+static const struct vm_operations_struct vm_ops = {
+ .fault = msm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs msm_gem_object_funcs = {
+ .free = msm_gem_free_object,
+ .pin = msm_gem_prime_pin,
+ .unpin = msm_gem_prime_unpin,
+ .get_sg_table = msm_gem_prime_get_sg_table,
+ .vmap = msm_gem_prime_vmap,
+ .vunmap = msm_gem_prime_vunmap,
+ .vm_ops = &vm_ops,
+};
+
static int msm_gem_new_impl(struct drm_device *dev,
uint32_t size, uint32_t flags,
struct drm_gem_object **obj)
@@ -1030,6 +1046,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
+ (*obj)->funcs = &msm_gem_object_funcs;
return 0;
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index b721b8b262ce..956f631997f2 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -302,7 +302,7 @@ static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc,
}
static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev);
struct drm_device *drm = mxsfb->drm;
@@ -326,7 +326,7 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void mxsfb_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(crtc->dev);
struct drm_device *drm = mxsfb->drm;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2ee75646ad6f..75fddbcd7832 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -46,6 +46,7 @@
static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg);
+static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/*
* NV10-NV40 tiling helpers
@@ -139,7 +140,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- WARN_ON(nvbo->pin_refcnt > 0);
+ WARN_ON(nvbo->bo.pin_count > 0);
nouveau_bo_del_io_reserve_lru(bo);
nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
@@ -343,37 +344,23 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
}
static void
-set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n,
- uint32_t domain, uint32_t flags)
+set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain)
{
*n = 0;
if (domain & NOUVEAU_GEM_DOMAIN_VRAM) {
- struct nvif_mmu *mmu = &drm->client.mmu;
- const u8 type = mmu->type[drm->ttm.type_vram].type;
-
pl[*n].mem_type = TTM_PL_VRAM;
- pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED;
-
- /* Some BARs do not support being ioremapped WC */
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
- type & NVIF_MEM_UNCACHED)
- pl[*n].flags &= ~TTM_PL_FLAG_WC;
-
+ pl[*n].flags = 0;
(*n)++;
}
if (domain & NOUVEAU_GEM_DOMAIN_GART) {
pl[*n].mem_type = TTM_PL_TT;
- pl[*n].flags = flags;
-
- if (drm->agp.bridge)
- pl[*n].flags &= ~TTM_PL_FLAG_CACHED;
-
+ pl[*n].flags = 0;
(*n)++;
}
if (domain & NOUVEAU_GEM_DOMAIN_CPU) {
pl[*n].mem_type = TTM_PL_SYSTEM;
- pl[(*n)++].flags = flags;
+ pl[(*n)++].flags = 0;
}
}
@@ -415,19 +402,14 @@ void
nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain,
uint32_t busy)
{
- struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_placement *pl = &nvbo->placement;
- uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
- TTM_PL_MASK_CACHING) |
- (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
pl->placement = nvbo->placements;
- set_placement_list(drm, nvbo->placements, &pl->num_placement,
- domain, flags);
+ set_placement_list(nvbo->placements, &pl->num_placement, domain);
pl->busy_placement = nvbo->busy_placements;
- set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement,
- domain | busy, flags);
+ set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
+ domain | busy);
set_placement_range(nvbo, domain);
}
@@ -453,7 +435,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
}
}
- if (nvbo->pin_refcnt) {
+ if (nvbo->bo.pin_count) {
bool error = evict;
switch (bo->mem.mem_type) {
@@ -472,7 +454,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
bo->mem.mem_type, domain);
ret = -EBUSY;
}
- nvbo->pin_refcnt++;
+ ttm_bo_pin(&nvbo->bo);
goto out;
}
@@ -483,18 +465,12 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
goto out;
}
- nvbo->pin_refcnt++;
nouveau_bo_placement_set(nvbo, domain, 0);
-
- /* drop pin_refcnt temporarily, so we don't trip the assertion
- * in nouveau_bo_move() that makes sure we're not trying to
- * move a pinned buffer
- */
- nvbo->pin_refcnt--;
ret = nouveau_bo_validate(nvbo, false, false);
if (ret)
goto out;
- nvbo->pin_refcnt++;
+
+ ttm_bo_pin(&nvbo->bo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
@@ -519,30 +495,14 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
- int ret, ref;
+ int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
if (ret)
return ret;
- ref = --nvbo->pin_refcnt;
- WARN_ON_ONCE(ref < 0);
- if (ref)
- goto out;
-
- switch (bo->mem.mem_type) {
- case TTM_PL_VRAM:
- nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
- break;
- case TTM_PL_TT:
- nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
- break;
- default:
- break;
- }
-
- ret = nouveau_bo_validate(nvbo, false, false);
- if (ret == 0) {
+ ttm_bo_unpin(&nvbo->bo);
+ if (!nvbo->bo.pin_count) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available += bo->mem.size;
@@ -555,9 +515,8 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
}
}
-out:
ttm_bo_unreserve(bo);
- return ret;
+ return 0;
}
int
@@ -588,7 +547,7 @@ void
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i;
if (!ttm_dma)
@@ -598,7 +557,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; i++)
dma_sync_single_for_device(drm->dev->dev,
ttm_dma->dma_address[i],
PAGE_SIZE, DMA_TO_DEVICE);
@@ -608,7 +567,7 @@ void
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
+ struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
int i;
if (!ttm_dma)
@@ -618,7 +577,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
if (nvbo->force_coherent)
return;
- for (i = 0; i < ttm_dma->ttm.num_pages; i++)
+ for (i = 0; i < ttm_dma->num_pages; i++)
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
PAGE_SIZE, DMA_FROM_DEVICE);
}
@@ -796,8 +755,9 @@ done:
}
static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_gpu, struct ttm_resource *new_reg)
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_reg)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_channel *chan = drm->ttm.chan;
@@ -816,7 +776,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
}
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
- ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
+ ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
if (ret == 0) {
@@ -903,15 +863,15 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
}
static int
-nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_gpu, struct ttm_resource *new_reg)
+nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_reg)
{
- struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
struct ttm_place placement_memtype = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_TT,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
struct ttm_placement placement;
struct ttm_resource tmp_reg;
@@ -922,11 +882,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_reg = *new_reg;
tmp_reg.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx);
if (ret)
return ret;
- ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (ret)
goto out;
@@ -934,26 +894,32 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
+ ret = nouveau_bo_move_m2mf(bo, true, ctx, &tmp_reg);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, &ctx, new_reg);
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto out;
+
+ nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, &tmp_reg);
out:
ttm_resource_free(bo, &tmp_reg);
return ret;
}
static int
-nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_gpu, struct ttm_resource *new_reg)
+nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_reg)
{
- struct ttm_operation_ctx ctx = { intr, no_wait_gpu };
struct ttm_place placement_memtype = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_TT,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
struct ttm_placement placement;
struct ttm_resource tmp_reg;
@@ -964,15 +930,20 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_reg = *new_reg;
tmp_reg.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, &ctx);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, ctx);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, &ctx, &tmp_reg);
- if (ret)
- goto out;
+ ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (unlikely(ret != 0))
+ return ret;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
+ ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ttm_bo_assign_mem(bo, &tmp_reg);
+ ret = nouveau_bo_move_m2mf(bo, true, ctx, new_reg);
if (ret)
goto out;
@@ -1061,17 +1032,24 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+ if (new_reg->mem_type == TTM_PL_TT) {
+ ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
+ if (ret)
+ return ret;
+ }
+
+ nouveau_bo_move_ntfy(bo, evict, new_reg);
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
- return ret;
+ goto out_ntfy;
- if (nvbo->pin_refcnt)
+ if (nvbo->bo.pin_count)
NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
if (ret)
- return ret;
+ goto out_ntfy;
}
/* Fake bo copy. */
@@ -1080,28 +1058,37 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
goto out;
}
+ if (old_reg->mem_type == TTM_PL_SYSTEM &&
+ new_reg->mem_type == TTM_PL_TT) {
+ ttm_bo_move_null(bo, new_reg);
+ goto out;
+ }
+
+ if (old_reg->mem_type == TTM_PL_TT &&
+ new_reg->mem_type == TTM_PL_SYSTEM) {
+ nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_reg);
+ goto out;
+ }
+
/* Hardware assisted copy. */
if (drm->ttm.move) {
if (new_reg->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict,
- ctx->interruptible,
- ctx->no_wait_gpu, new_reg);
+ ret = nouveau_bo_move_flipd(bo, evict, ctx,
+ new_reg);
else if (old_reg->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict,
- ctx->interruptible,
- ctx->no_wait_gpu, new_reg);
+ ret = nouveau_bo_move_flips(bo, evict, ctx,
+ new_reg);
else
- ret = nouveau_bo_move_m2mf(bo, evict,
- ctx->interruptible,
- ctx->no_wait_gpu, new_reg);
+ ret = nouveau_bo_move_m2mf(bo, evict, ctx,
+ new_reg);
if (!ret)
goto out;
}
/* Fallback to software copy. */
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
- if (ret == 0)
- ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
+ ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
out:
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1110,7 +1097,12 @@ out:
else
nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
}
-
+out_ntfy:
+ if (ret) {
+ swap(*new_reg, bo->mem);
+ nouveau_bo_move_ntfy(bo, false, new_reg);
+ swap(*new_reg, bo->mem);
+ }
return ret;
}
@@ -1150,6 +1142,8 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
struct nouveau_mem *mem = nouveau_mem(reg);
+ struct nvif_mmu *mmu = &drm->client.mmu;
+ const u8 type = mmu->type[drm->ttm.type_vram].type;
int ret;
mutex_lock(&drm->ttm.io_reserve_mutex);
@@ -1165,6 +1159,7 @@ retry:
reg->bus.offset = (reg->start << PAGE_SHIFT) +
drm->agp.base;
reg->bus.is_iomem = !drm->agp.cma;
+ reg->bus.caching = ttm_write_combined;
}
#endif
if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
@@ -1178,6 +1173,14 @@ retry:
reg->bus.offset = (reg->start << PAGE_SHIFT) +
device->func->resource_addr(device, 1);
reg->bus.is_iomem = true;
+
+ /* Some BARs do not support being ioremapped WC */
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
+ type & NVIF_MEM_UNCACHED)
+ reg->bus.caching = ttm_uncached;
+ else
+ reg->bus.caching = ttm_write_combined;
+
if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
union {
struct nv50_mem_map_v0 nv50;
@@ -1252,8 +1255,7 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
mutex_unlock(&drm->ttm.io_reserve_mutex);
}
-static int
-nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1269,41 +1271,45 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
!nvbo->kind)
return 0;
- if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
- 0);
+ if (bo->mem.mem_type != TTM_PL_SYSTEM)
+ return 0;
+
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
- ret = nouveau_bo_validate(nvbo, false, false);
- if (ret)
- return ret;
+ } else {
+ /* make sure bo is in mappable vram */
+ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
+ bo->mem.start + bo->mem.num_pages < mappable)
+ return 0;
+
+ for (i = 0; i < nvbo->placement.num_placement; ++i) {
+ nvbo->placements[i].fpfn = 0;
+ nvbo->placements[i].lpfn = mappable;
}
- return 0;
- }
- /* make sure bo is in mappable vram */
- if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
- bo->mem.start + bo->mem.num_pages < mappable)
- return 0;
+ for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
+ nvbo->busy_placements[i].fpfn = 0;
+ nvbo->busy_placements[i].lpfn = mappable;
+ }
- for (i = 0; i < nvbo->placement.num_placement; ++i) {
- nvbo->placements[i].fpfn = 0;
- nvbo->placements[i].lpfn = mappable;
+ nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
}
- for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
- nvbo->busy_placements[i].fpfn = 0;
- nvbo->busy_placements[i].lpfn = mappable;
- }
+ ret = nouveau_bo_validate(nvbo, false, false);
+ if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
+ return VM_FAULT_NOPAGE;
+ else if (unlikely(ret))
+ return VM_FAULT_SIGBUS;
- nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
- return nouveau_bo_validate(nvbo, false, false);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
+ return 0;
}
static int
nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
- struct ttm_dma_tt *ttm_dma = (void *)ttm;
+ struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1315,7 +1321,6 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
/* make userspace faulting work */
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
ttm_dma->dma_address, ttm->num_pages);
- ttm_tt_set_populated(ttm);
return 0;
}
@@ -1340,7 +1345,7 @@ static void
nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{
- struct ttm_dma_tt *ttm_dma = (void *)ttm;
+ struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1395,19 +1400,22 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl
dma_resv_add_shared_fence(resv, &fence->base);
}
+static void
+nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ nouveau_bo_move_ntfy(bo, false, NULL);
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
- .ttm_tt_bind = &nouveau_ttm_tt_bind,
- .ttm_tt_unbind = &nouveau_ttm_tt_unbind,
.ttm_tt_destroy = &nouveau_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
- .move_notify = nouveau_bo_move_ntfy,
+ .delete_mem_notify = nouveau_bo_delete_mem_notify,
.move = nouveau_bo_move,
.verify_access = nouveau_bo_verify_access,
- .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 2a23c8207436..641ef6298a0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -40,9 +40,6 @@ struct nouveau_bo {
struct nouveau_drm_tile *tile;
- /* protect by the ttm reservation lock */
- int pin_refcnt;
-
struct ttm_bo_kmap_obj dma_buf_vmap;
};
@@ -92,6 +89,7 @@ void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
u32 nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
+vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo);
void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
bool no_wait_gpu);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 8f099601d2f2..5d191e58edf1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -107,7 +107,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
nvif_object_dtor(&chan->push.ctxdma);
nouveau_vma_del(&chan->push.vma);
nouveau_bo_unmap(chan->push.buffer);
- if (chan->push.buffer && chan->push.buffer->pin_refcnt)
+ if (chan->push.buffer && chan->push.buffer->bo.pin_count)
nouveau_bo_unpin(chan->push.buffer);
nouveau_bo_ref(NULL, &chan->push.buffer);
kfree(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 42fc5c813a9b..d141a5f004af 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -820,6 +820,7 @@ static int
nouveau_do_suspend(struct drm_device *dev, bool runtime)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ struct ttm_resource_manager *man;
int ret;
nouveau_svm_suspend(drm);
@@ -836,7 +837,9 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
NV_DEBUG(drm, "evicting buffers...\n");
- ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+
+ man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
+ ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
if (drm->cechan) {
@@ -1207,16 +1210,7 @@ driver_stub = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = nouveau_gem_prime_pin,
- .gem_prime_unpin = nouveau_gem_prime_unpin,
- .gem_prime_get_sg_table = nouveau_gem_prime_get_sg_table,
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
- .gem_prime_vmap = nouveau_gem_prime_vmap,
- .gem_prime_vunmap = nouveau_gem_prime_vunmap,
-
- .gem_free_object_unlocked = nouveau_gem_object_del,
- .gem_open_object = nouveau_gem_object_open,
- .gem_close_object = nouveau_gem_object_close,
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = nouveau_display_dumb_map_offset,
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 549bc67feabb..dd51cd0ae20c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -169,6 +169,17 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
ttm_bo_unreserve(&nvbo->bo);
}
+const struct drm_gem_object_funcs nouveau_gem_object_funcs = {
+ .free = nouveau_gem_object_del,
+ .open = nouveau_gem_object_open,
+ .close = nouveau_gem_object_close,
+ .pin = nouveau_gem_prime_pin,
+ .unpin = nouveau_gem_prime_unpin,
+ .get_sg_table = nouveau_gem_prime_get_sg_table,
+ .vmap = nouveau_gem_prime_vmap,
+ .vunmap = nouveau_gem_prime_vunmap,
+};
+
int
nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
uint32_t tile_mode, uint32_t tile_flags,
@@ -186,6 +197,8 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (IS_ERR(nvbo))
return PTR_ERR(nvbo);
+ nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
+
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(drm->dev, &nvbo->bo.base, size);
@@ -210,7 +223,6 @@ nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
nvbo->valid_domains &= domain;
- nvbo->bo.persistent_swap_storage = nvbo->bo.base.filp;
*pnvbo = nvbo;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.h b/drivers/gpu/drm/nouveau/nouveau_gem.h
index 978e07591990..b35c180322e2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.h
@@ -5,6 +5,8 @@
#include "nouveau_drv.h"
#include "nouveau_bo.h"
+extern const struct drm_gem_object_funcs nouveau_gem_object_funcs;
+
static inline struct nouveau_bo *
nouveau_gem_object(struct drm_gem_object *gem)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 9dfcce1b9846..a1049e9feee1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -92,7 +92,7 @@ nouveau_mem_fini(struct nouveau_mem *mem)
}
int
-nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
+nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
{
struct nouveau_mem *mem = nouveau_mem(reg);
struct nouveau_cli *cli = mem->cli;
@@ -116,8 +116,10 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
mem->comp = 0;
}
- if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl;
- else args.dma = tt->dma_address;
+ if (tt->sg)
+ args.sgl = tt->sg->sgl;
+ else
+ args.dma = tt->dma_address;
mutex_lock(&drm->master.lock);
cli->base.super = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h
index 3fe1cfed57a1..7df3848e85aa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.h
@@ -1,7 +1,7 @@
#ifndef __NOUVEAU_MEM_H__
#define __NOUVEAU_MEM_H__
#include <drm/ttm/ttm_bo_api.h>
-struct ttm_dma_tt;
+struct ttm_tt;
#include <nvif/mem.h>
#include <nvif/vmm.h>
@@ -24,7 +24,7 @@ int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
struct ttm_resource *);
void nouveau_mem_del(struct ttm_resource *);
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
-int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *);
+int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
void nouveau_mem_fini(struct nouveau_mem *);
int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index b2ecb91f8ddc..a8264aebf3d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -77,6 +77,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
+ nvbo->bo.base.funcs = &nouveau_gem_object_funcs;
+
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller, instead of a normal nouveau_bo ttm reference. */
ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 806d9ec310f5..a2e23fd4906c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -5,12 +5,13 @@
#include "nouveau_drv.h"
#include "nouveau_mem.h"
#include "nouveau_ttm.h"
+#include "nouveau_bo.h"
struct nouveau_sgdma_be {
/* this has to be the first field so populate/unpopulated in
* nouve_bo.c works properly, otherwise have to move them here
*/
- struct ttm_dma_tt ttm;
+ struct ttm_tt ttm;
struct nouveau_mem *mem;
};
@@ -22,7 +23,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
if (ttm) {
nouveau_sgdma_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
- ttm_dma_tt_fini(&nvbe->ttm);
+ ttm_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
}
@@ -67,15 +68,25 @@ nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
{
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_sgdma_be *nvbe;
+ enum ttm_caching caching;
+
+ if (nvbo->force_coherent)
+ caching = ttm_uncached;
+ else if (drm->agp.bridge)
+ caching = ttm_write_combined;
+ else
+ caching = ttm_cached;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
- if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) {
+ if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) {
kfree(nvbe);
return NULL;
}
- return &nvbe->ttm.ttm;
+ return &nvbe->ttm;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 427341753441..0592ed6eaad1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -108,7 +108,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man,
return ret;
ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0,
- reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
+ (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) {
nouveau_mem_del(reg);
return ret;
@@ -134,17 +134,19 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
if (ret)
return ret;
- nouveau_bo_del_io_reserve_lru(bo);
+ ret = nouveau_ttm_fault_reserve_notify(bo);
+ if (ret)
+ goto error_unlock;
+ nouveau_bo_del_io_reserve_lru(bo);
prot = vm_get_page_prot(vma->vm_flags);
ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+ nouveau_bo_add_io_reserve_lru(bo);
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
- nouveau_bo_add_io_reserve_lru(bo);
-
+error_unlock:
dma_resv_unlock(bo->base.resv);
-
return ret;
}
@@ -220,7 +222,7 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm)
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
ttm_resource_manager_set_used(man, false);
- ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man);
+ ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL);
kfree(man);
@@ -265,7 +267,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm)
ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT);
else {
ttm_resource_manager_set_used(man, false);
- ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man);
+ ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
ttm_resource_manager_cleanup(man);
ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL);
kfree(man);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 328a4a74f534..fef3b0032fd8 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -436,7 +436,7 @@ static void omap_crtc_arm_event(struct drm_crtc *crtc)
}
static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
@@ -462,7 +462,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void omap_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 53d5e184ee77..2e598b8b72af 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -521,12 +521,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static const struct vm_operations_struct omap_gem_vm_ops = {
- .fault = omap_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations omapdriver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -549,10 +543,7 @@ static struct drm_driver omap_drm_driver = {
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = omap_gem_prime_export,
.gem_prime_import = omap_gem_prime_import,
- .gem_free_object_unlocked = omap_gem_free_object,
- .gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
.ioctls = ioctls,
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index f67f223c6479..d8e09792793a 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -487,7 +487,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-vm_fault_t omap_gem_fault(struct vm_fault *vmf)
+static vm_fault_t omap_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@ -1089,7 +1089,7 @@ void omap_gem_describe_objects(struct list_head *list, struct seq_file *m)
* Constructor & Destructor
*/
-void omap_gem_free_object(struct drm_gem_object *obj)
+static void omap_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct omap_drm_private *priv = dev->dev_private;
@@ -1169,6 +1169,18 @@ static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags)
return true;
}
+static const struct vm_operations_struct omap_gem_vm_ops = {
+ .fault = omap_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs omap_gem_object_funcs = {
+ .free = omap_gem_free_object,
+ .export = omap_gem_prime_export,
+ .vm_ops = &omap_gem_vm_ops,
+};
+
/* GEM buffer object constructor */
struct drm_gem_object *omap_gem_new(struct drm_device *dev,
union omap_gem_size gsize, u32 flags)
@@ -1236,6 +1248,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
size = PAGE_ALIGN(gsize.bytes);
}
+ obj->funcs = &omap_gem_object_funcs;
+
/* Initialize the GEM object. */
if (!(flags & OMAP_BO_MEM_SHMEM)) {
drm_gem_private_object_init(dev, obj, size);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.h b/drivers/gpu/drm/omapdrm/omap_gem.h
index 729b7812a815..eda9b4839c30 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.h
+++ b/drivers/gpu/drm/omapdrm/omap_gem.h
@@ -48,7 +48,6 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
struct sg_table *sgt);
int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
union omap_gem_size gsize, u32 flags, u32 *handle);
-void omap_gem_free_object(struct drm_gem_object *obj);
void *omap_gem_vaddr(struct drm_gem_object *obj);
/* Dumb Buffers Interface */
@@ -69,7 +68,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_gem_object *obj, int flags);
struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
struct dma_buf *buffer);
-vm_fault_t omap_gem_fault(struct vm_fault *vmf);
int omap_gem_roll(struct drm_gem_object *obj, u32 roll);
void omap_gem_cpu_sync_page(struct drm_gem_object *obj, int pgoff);
void omap_gem_dma_sync_buffer(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index b9dbedf8f15e..e386524b2d77 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -208,6 +208,16 @@ config DRM_PANEL_NOVATEK_NT35510
around the Novatek NT35510 display controller, such as some
Hydis panels.
+config DRM_PANEL_NOVATEK_NT36672A
+ tristate "Novatek NT36672A DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Novatek NT36672A display controller, such as some
+ Tianma panels used in a few Xiaomi Poco F1 mobile phones.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -450,6 +460,17 @@ config DRM_PANEL_SONY_ACX565AKM
Say Y here if you want to enable support for the Sony ACX565AKM
800x600 3.5" panel (found on the Nokia N900).
+config DRM_PANEL_TDO_TL070WSH30
+ tristate "TDO TL070WSH30 DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for TDO TL070WSH30 TFT-LCD
+ panel module. The panel has a 1024×600 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host, a built-in LED backlight and touch controller.
+
config DRM_PANEL_TPO_TD028TTEC1
tristate "Toppoly (TPO) TD028TTEC1 panel driver"
depends on OF && SPI
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 2ba560bca61d..d1f8cc572f37 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7703) += panel-sitronix-st7703.o
obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX424AKP) += panel-sony-acx424akp.o
obj-$(CONFIG_DRM_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
+obj-$(CONFIG_DRM_PANEL_TDO_TL070WSH30) += panel-tdo-tl070wsh30.o
obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
new file mode 100644
index 000000000000..533cd3934b8b
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Linaro Ltd
+ * Author: Sumit Semwal <sumit.semwal@linaro.org>
+ *
+ * This driver is for the DSI interface to panels using the NT36672A display driver IC
+ * from Novatek.
+ * Currently supported are the Tianma FHD+ panels found in some Xiaomi phones, including
+ * some variants of the Poco F1 phone.
+ *
+ * Panels using the Novatek NT37762A IC should add appropriate configuration per-panel and
+ * use this driver.
+ */
+
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct nt36672a_panel_cmd {
+ const char data[2];
+};
+
+static const char * const nt36672a_regulator_names[] = {
+ "vddio",
+ "vddpos",
+ "vddneg",
+};
+
+static unsigned long const nt36672a_regulator_enable_loads[] = {
+ 62000,
+ 100000,
+ 100000
+};
+
+struct nt36672a_panel_desc {
+ const struct drm_display_mode *display_mode;
+ const char *panel_name;
+
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ unsigned int lanes;
+
+ unsigned int num_on_cmds_1;
+ const struct nt36672a_panel_cmd *on_cmds_1;
+ unsigned int num_on_cmds_2;
+ const struct nt36672a_panel_cmd *on_cmds_2;
+
+ unsigned int num_off_cmds;
+ const struct nt36672a_panel_cmd *off_cmds;
+};
+
+struct nt36672a_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+ const struct nt36672a_panel_desc *desc;
+
+ struct regulator_bulk_data supplies[ARRAY_SIZE(nt36672a_regulator_names)];
+
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline struct nt36672a_panel *to_nt36672a_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt36672a_panel, base);
+}
+
+static int nt36672a_send_cmds(struct drm_panel *panel, const struct nt36672a_panel_cmd *cmds,
+ int num)
+{
+ struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < num; i++) {
+ const struct nt36672a_panel_cmd *cmd = &cmds[i];
+
+ err = mipi_dsi_dcs_write(pinfo->link, cmd->data[0], cmd->data + 1, 1);
+
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int nt36672a_panel_power_off(struct drm_panel *panel)
+{
+ struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+ int ret = 0;
+
+ gpiod_set_value(pinfo->reset_gpio, 1);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(pinfo->supplies), pinfo->supplies);
+ if (ret)
+ dev_err(panel->dev, "regulator_bulk_disable failed %d\n", ret);
+
+ return ret;
+}
+
+static int nt36672a_panel_unprepare(struct drm_panel *panel)
+{
+ struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+ int ret;
+
+ if (!pinfo->prepared)
+ return 0;
+
+ /* send off cmds */
+ ret = nt36672a_send_cmds(panel, pinfo->desc->off_cmds,
+ pinfo->desc->num_off_cmds);
+
+ if (ret < 0)
+ dev_err(panel->dev, "failed to send DCS off cmds: %d\n", ret);
+
+ ret = mipi_dsi_dcs_set_display_off(pinfo->link);
+ if (ret < 0)
+ dev_err(panel->dev, "set_display_off cmd failed ret = %d\n", ret);
+
+ /* 120ms delay required here as per DCS spec */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->link);
+ if (ret < 0)
+ dev_err(panel->dev, "enter_sleep cmd failed ret = %d\n", ret);
+
+ /* 0x3C = 60ms delay */
+ msleep(60);
+
+ ret = nt36672a_panel_power_off(panel);
+ if (ret < 0)
+ dev_err(panel->dev, "power_off failed ret = %d\n", ret);
+
+ pinfo->prepared = false;
+
+ return ret;
+}
+
+static int nt36672a_panel_power_on(struct nt36672a_panel *pinfo)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(pinfo->supplies), pinfo->supplies);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * As per downstream kernel, Reset sequence of Tianma FHD panel requires the panel to
+ * be out of reset for 10ms, followed by being held in reset for 10ms. But for Android
+ * AOSP, we needed to bump it upto 200ms otherwise we get white screen sometimes.
+ * FIXME: Try to reduce this 200ms to a lesser value.
+ */
+ gpiod_set_value(pinfo->reset_gpio, 1);
+ msleep(200);
+ gpiod_set_value(pinfo->reset_gpio, 0);
+ msleep(200);
+
+ return 0;
+}
+
+static int nt36672a_panel_prepare(struct drm_panel *panel)
+{
+ struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+ int err;
+
+ if (pinfo->prepared)
+ return 0;
+
+ err = nt36672a_panel_power_on(pinfo);
+ if (err < 0)
+ goto poweroff;
+
+ /* send first part of init cmds */
+ err = nt36672a_send_cmds(panel, pinfo->desc->on_cmds_1,
+ pinfo->desc->num_on_cmds_1);
+
+ if (err < 0) {
+ dev_err(panel->dev, "failed to send DCS Init 1st Code: %d\n", err);
+ goto poweroff;
+ }
+
+ err = mipi_dsi_dcs_exit_sleep_mode(pinfo->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+ goto poweroff;
+ }
+
+ /* 0x46 = 70 ms delay */
+ msleep(70);
+
+ err = mipi_dsi_dcs_set_display_on(pinfo->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to Set Display ON: %d\n", err);
+ goto poweroff;
+ }
+
+ /* Send rest of the init cmds */
+ err = nt36672a_send_cmds(panel, pinfo->desc->on_cmds_2,
+ pinfo->desc->num_on_cmds_2);
+
+ if (err < 0) {
+ dev_err(panel->dev, "failed to send DCS Init 2nd Code: %d\n", err);
+ goto poweroff;
+ }
+
+ msleep(120);
+
+ pinfo->prepared = true;
+
+ return 0;
+
+poweroff:
+ gpiod_set_value(pinfo->reset_gpio, 0);
+ return err;
+}
+
+static int nt36672a_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct nt36672a_panel *pinfo = to_nt36672a_panel(panel);
+ const struct drm_display_mode *m = pinfo->desc->display_mode;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n", m->hdisplay,
+ m->vdisplay, drm_mode_vrefresh(m));
+ return -ENOMEM;
+ }
+
+ connector->display_info.width_mm = pinfo->desc->width_mm;
+ connector->display_info.height_mm = pinfo->desc->height_mm;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs panel_funcs = {
+ .unprepare = nt36672a_panel_unprepare,
+ .prepare = nt36672a_panel_prepare,
+ .get_modes = nt36672a_panel_get_modes,
+};
+
+static const struct nt36672a_panel_cmd tianma_fhd_video_on_cmds_1[] = {
+ /* skin enhancement mode */
+ { .data = {0xFF, 0x22} },
+ { .data = {0x00, 0x40} },
+ { .data = {0x01, 0xC0} },
+ { .data = {0x02, 0x40} },
+ { .data = {0x03, 0x40} },
+ { .data = {0x04, 0x40} },
+ { .data = {0x05, 0x40} },
+ { .data = {0x06, 0x40} },
+ { .data = {0x07, 0x40} },
+ { .data = {0x08, 0x40} },
+ { .data = {0x09, 0x40} },
+ { .data = {0x0A, 0x40} },
+ { .data = {0x0B, 0x40} },
+ { .data = {0x0C, 0x40} },
+ { .data = {0x0D, 0x40} },
+ { .data = {0x0E, 0x40} },
+ { .data = {0x0F, 0x40} },
+ { .data = {0x10, 0x40} },
+ { .data = {0x11, 0x50} },
+ { .data = {0x12, 0x60} },
+ { .data = {0x13, 0x70} },
+ { .data = {0x14, 0x58} },
+ { .data = {0x15, 0x68} },
+ { .data = {0x16, 0x78} },
+ { .data = {0x17, 0x77} },
+ { .data = {0x18, 0x39} },
+ { .data = {0x19, 0x2D} },
+ { .data = {0x1A, 0x2E} },
+ { .data = {0x1B, 0x32} },
+ { .data = {0x1C, 0x37} },
+ { .data = {0x1D, 0x3A} },
+ { .data = {0x1E, 0x40} },
+ { .data = {0x1F, 0x40} },
+ { .data = {0x20, 0x40} },
+ { .data = {0x21, 0x40} },
+ { .data = {0x22, 0x40} },
+ { .data = {0x23, 0x40} },
+ { .data = {0x24, 0x40} },
+ { .data = {0x25, 0x40} },
+ { .data = {0x26, 0x40} },
+ { .data = {0x27, 0x40} },
+ { .data = {0x28, 0x40} },
+ { .data = {0x2D, 0x00} },
+ { .data = {0x2F, 0x40} },
+ { .data = {0x30, 0x40} },
+ { .data = {0x31, 0x40} },
+ { .data = {0x32, 0x40} },
+ { .data = {0x33, 0x40} },
+ { .data = {0x34, 0x40} },
+ { .data = {0x35, 0x40} },
+ { .data = {0x36, 0x40} },
+ { .data = {0x37, 0x40} },
+ { .data = {0x38, 0x40} },
+ { .data = {0x39, 0x40} },
+ { .data = {0x3A, 0x40} },
+ { .data = {0x3B, 0x40} },
+ { .data = {0x3D, 0x40} },
+ { .data = {0x3F, 0x40} },
+ { .data = {0x40, 0x40} },
+ { .data = {0x41, 0x40} },
+ { .data = {0x42, 0x40} },
+ { .data = {0x43, 0x40} },
+ { .data = {0x44, 0x40} },
+ { .data = {0x45, 0x40} },
+ { .data = {0x46, 0x40} },
+ { .data = {0x47, 0x40} },
+ { .data = {0x48, 0x40} },
+ { .data = {0x49, 0x40} },
+ { .data = {0x4A, 0x40} },
+ { .data = {0x4B, 0x40} },
+ { .data = {0x4C, 0x40} },
+ { .data = {0x4D, 0x40} },
+ { .data = {0x4E, 0x40} },
+ { .data = {0x4F, 0x40} },
+ { .data = {0x50, 0x40} },
+ { .data = {0x51, 0x40} },
+ { .data = {0x52, 0x40} },
+ { .data = {0x53, 0x01} },
+ { .data = {0x54, 0x01} },
+ { .data = {0x55, 0xFE} },
+ { .data = {0x56, 0x77} },
+ { .data = {0x58, 0xCD} },
+ { .data = {0x59, 0xD0} },
+ { .data = {0x5A, 0xD0} },
+ { .data = {0x5B, 0x50} },
+ { .data = {0x5C, 0x50} },
+ { .data = {0x5D, 0x50} },
+ { .data = {0x5E, 0x50} },
+ { .data = {0x5F, 0x50} },
+ { .data = {0x60, 0x50} },
+ { .data = {0x61, 0x50} },
+ { .data = {0x62, 0x50} },
+ { .data = {0x63, 0x50} },
+ { .data = {0x64, 0x50} },
+ { .data = {0x65, 0x50} },
+ { .data = {0x66, 0x50} },
+ { .data = {0x67, 0x50} },
+ { .data = {0x68, 0x50} },
+ { .data = {0x69, 0x50} },
+ { .data = {0x6A, 0x50} },
+ { .data = {0x6B, 0x50} },
+ { .data = {0x6C, 0x50} },
+ { .data = {0x6D, 0x50} },
+ { .data = {0x6E, 0x50} },
+ { .data = {0x6F, 0x50} },
+ { .data = {0x70, 0x07} },
+ { .data = {0x71, 0x00} },
+ { .data = {0x72, 0x00} },
+ { .data = {0x73, 0x00} },
+ { .data = {0x74, 0x06} },
+ { .data = {0x75, 0x0C} },
+ { .data = {0x76, 0x03} },
+ { .data = {0x77, 0x09} },
+ { .data = {0x78, 0x0F} },
+ { .data = {0x79, 0x68} },
+ { .data = {0x7A, 0x88} },
+ { .data = {0x7C, 0x80} },
+ { .data = {0x7D, 0x80} },
+ { .data = {0x7E, 0x80} },
+ { .data = {0x7F, 0x00} },
+ { .data = {0x80, 0x00} },
+ { .data = {0x81, 0x00} },
+ { .data = {0x83, 0x01} },
+ { .data = {0x84, 0x00} },
+ { .data = {0x85, 0x80} },
+ { .data = {0x86, 0x80} },
+ { .data = {0x87, 0x80} },
+ { .data = {0x88, 0x40} },
+ { .data = {0x89, 0x91} },
+ { .data = {0x8A, 0x98} },
+ { .data = {0x8B, 0x80} },
+ { .data = {0x8C, 0x80} },
+ { .data = {0x8D, 0x80} },
+ { .data = {0x8E, 0x80} },
+ { .data = {0x8F, 0x80} },
+ { .data = {0x90, 0x80} },
+ { .data = {0x91, 0x80} },
+ { .data = {0x92, 0x80} },
+ { .data = {0x93, 0x80} },
+ { .data = {0x94, 0x80} },
+ { .data = {0x95, 0x80} },
+ { .data = {0x96, 0x80} },
+ { .data = {0x97, 0x80} },
+ { .data = {0x98, 0x80} },
+ { .data = {0x99, 0x80} },
+ { .data = {0x9A, 0x80} },
+ { .data = {0x9B, 0x80} },
+ { .data = {0x9C, 0x80} },
+ { .data = {0x9D, 0x80} },
+ { .data = {0x9E, 0x80} },
+ { .data = {0x9F, 0x80} },
+ { .data = {0xA0, 0x8A} },
+ { .data = {0xA2, 0x80} },
+ { .data = {0xA6, 0x80} },
+ { .data = {0xA7, 0x80} },
+ { .data = {0xA9, 0x80} },
+ { .data = {0xAA, 0x80} },
+ { .data = {0xAB, 0x80} },
+ { .data = {0xAC, 0x80} },
+ { .data = {0xAD, 0x80} },
+ { .data = {0xAE, 0x80} },
+ { .data = {0xAF, 0x80} },
+ { .data = {0xB7, 0x76} },
+ { .data = {0xB8, 0x76} },
+ { .data = {0xB9, 0x05} },
+ { .data = {0xBA, 0x0D} },
+ { .data = {0xBB, 0x14} },
+ { .data = {0xBC, 0x0F} },
+ { .data = {0xBD, 0x18} },
+ { .data = {0xBE, 0x1F} },
+ { .data = {0xBF, 0x05} },
+ { .data = {0xC0, 0x0D} },
+ { .data = {0xC1, 0x14} },
+ { .data = {0xC2, 0x03} },
+ { .data = {0xC3, 0x07} },
+ { .data = {0xC4, 0x0A} },
+ { .data = {0xC5, 0xA0} },
+ { .data = {0xC6, 0x55} },
+ { .data = {0xC7, 0xFF} },
+ { .data = {0xC8, 0x39} },
+ { .data = {0xC9, 0x44} },
+ { .data = {0xCA, 0x12} },
+ { .data = {0xCD, 0x80} },
+ { .data = {0xDB, 0x80} },
+ { .data = {0xDC, 0x80} },
+ { .data = {0xDD, 0x80} },
+ { .data = {0xE0, 0x80} },
+ { .data = {0xE1, 0x80} },
+ { .data = {0xE2, 0x80} },
+ { .data = {0xE3, 0x80} },
+ { .data = {0xE4, 0x80} },
+ { .data = {0xE5, 0x40} },
+ { .data = {0xE6, 0x40} },
+ { .data = {0xE7, 0x40} },
+ { .data = {0xE8, 0x40} },
+ { .data = {0xE9, 0x40} },
+ { .data = {0xEA, 0x40} },
+ { .data = {0xEB, 0x40} },
+ { .data = {0xEC, 0x40} },
+ { .data = {0xED, 0x40} },
+ { .data = {0xEE, 0x40} },
+ { .data = {0xEF, 0x40} },
+ { .data = {0xF0, 0x40} },
+ { .data = {0xF1, 0x40} },
+ { .data = {0xF2, 0x40} },
+ { .data = {0xF3, 0x40} },
+ { .data = {0xF4, 0x40} },
+ { .data = {0xF5, 0x40} },
+ { .data = {0xF6, 0x40} },
+ { .data = {0xFB, 0x1} },
+ { .data = {0xFF, 0x23} },
+ { .data = {0xFB, 0x01} },
+ /* dimming enable */
+ { .data = {0x01, 0x84} },
+ { .data = {0x05, 0x2D} },
+ { .data = {0x06, 0x00} },
+ /* resolution 1080*2246 */
+ { .data = {0x11, 0x01} },
+ { .data = {0x12, 0x7B} },
+ { .data = {0x15, 0x6F} },
+ { .data = {0x16, 0x0B} },
+ /* UI mode */
+ { .data = {0x29, 0x0A} },
+ { .data = {0x30, 0xFF} },
+ { .data = {0x31, 0xFF} },
+ { .data = {0x32, 0xFF} },
+ { .data = {0x33, 0xFF} },
+ { .data = {0x34, 0xFF} },
+ { .data = {0x35, 0xFF} },
+ { .data = {0x36, 0xFF} },
+ { .data = {0x37, 0xFF} },
+ { .data = {0x38, 0xFC} },
+ { .data = {0x39, 0xF8} },
+ { .data = {0x3A, 0xF4} },
+ { .data = {0x3B, 0xF1} },
+ { .data = {0x3D, 0xEE} },
+ { .data = {0x3F, 0xEB} },
+ { .data = {0x40, 0xE8} },
+ { .data = {0x41, 0xE5} },
+ /* STILL mode */
+ { .data = {0x2A, 0x13} },
+ { .data = {0x45, 0xFF} },
+ { .data = {0x46, 0xFF} },
+ { .data = {0x47, 0xFF} },
+ { .data = {0x48, 0xFF} },
+ { .data = {0x49, 0xFF} },
+ { .data = {0x4A, 0xFF} },
+ { .data = {0x4B, 0xFF} },
+ { .data = {0x4C, 0xFF} },
+ { .data = {0x4D, 0xED} },
+ { .data = {0x4E, 0xD5} },
+ { .data = {0x4F, 0xBF} },
+ { .data = {0x50, 0xA6} },
+ { .data = {0x51, 0x96} },
+ { .data = {0x52, 0x86} },
+ { .data = {0x53, 0x76} },
+ { .data = {0x54, 0x66} },
+ /* MOVING mode */
+ { .data = {0x2B, 0x0E} },
+ { .data = {0x58, 0xFF} },
+ { .data = {0x59, 0xFF} },
+ { .data = {0x5A, 0xFF} },
+ { .data = {0x5B, 0xFF} },
+ { .data = {0x5C, 0xFF} },
+ { .data = {0x5D, 0xFF} },
+ { .data = {0x5E, 0xFF} },
+ { .data = {0x5F, 0xFF} },
+ { .data = {0x60, 0xF6} },
+ { .data = {0x61, 0xEA} },
+ { .data = {0x62, 0xE1} },
+ { .data = {0x63, 0xD8} },
+ { .data = {0x64, 0xCE} },
+ { .data = {0x65, 0xC3} },
+ { .data = {0x66, 0xBA} },
+ { .data = {0x67, 0xB3} },
+ { .data = {0xFF, 0x25} },
+ { .data = {0xFB, 0x01} },
+ { .data = {0x05, 0x04} },
+ { .data = {0xFF, 0x26} },
+ { .data = {0xFB, 0x01} },
+ { .data = {0x1C, 0xAF} },
+ { .data = {0xFF, 0x10} },
+ { .data = {0xFB, 0x01} },
+ { .data = {0x51, 0xFF} },
+ { .data = {0x53, 0x24} },
+ { .data = {0x55, 0x00} },
+};
+
+static const struct nt36672a_panel_cmd tianma_fhd_video_on_cmds_2[] = {
+ { .data = {0xFF, 0x24} },
+ { .data = {0xFB, 0x01} },
+ { .data = {0xC3, 0x01} },
+ { .data = {0xC4, 0x54} },
+ { .data = {0xFF, 0x10} },
+};
+
+static const struct nt36672a_panel_cmd tianma_fhd_video_off_cmds[] = {
+ { .data = {0xFF, 0x24} },
+ { .data = {0xFB, 0x01} },
+ { .data = {0xC3, 0x01} },
+ { .data = {0xFF, 0x10} },
+};
+
+static const struct drm_display_mode tianma_fhd_video_panel_default_mode = {
+ .clock = 161331,
+
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 40,
+ .hsync_end = 1080 + 40 + 20,
+ .htotal = 1080 + 40 + 20 + 44,
+
+ .vdisplay = 2246,
+ .vsync_start = 2246 + 15,
+ .vsync_end = 2246 + 15 + 2,
+ .vtotal = 2246 + 15 + 2 + 8,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct nt36672a_panel_desc tianma_fhd_video_panel_desc = {
+ .display_mode = &tianma_fhd_video_panel_default_mode,
+
+ .width_mm = 68,
+ .height_mm = 136,
+
+ .mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
+ | MIPI_DSI_MODE_VIDEO_HSE
+ | MIPI_DSI_CLOCK_NON_CONTINUOUS
+ | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+ .on_cmds_1 = tianma_fhd_video_on_cmds_1,
+ .num_on_cmds_1 = ARRAY_SIZE(tianma_fhd_video_on_cmds_1),
+ .on_cmds_2 = tianma_fhd_video_on_cmds_2,
+ .num_on_cmds_2 = ARRAY_SIZE(tianma_fhd_video_on_cmds_2),
+ .off_cmds = tianma_fhd_video_off_cmds,
+ .num_off_cmds = ARRAY_SIZE(tianma_fhd_video_off_cmds),
+};
+
+static int nt36672a_panel_add(struct nt36672a_panel *pinfo)
+{
+ struct device *dev = &pinfo->link->dev;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++)
+ pinfo->supplies[i].supply = nt36672a_regulator_names[i];
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(pinfo->supplies),
+ pinfo->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ for (i = 0; i < ARRAY_SIZE(pinfo->supplies); i++) {
+ ret = regulator_set_load(pinfo->supplies[i].consumer,
+ nt36672a_regulator_enable_loads[i]);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to set regulator enable loads\n");
+ }
+
+ pinfo->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pinfo->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio),
+ "failed to get reset gpio from DT\n");
+
+ drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ drm_panel_add(&pinfo->base);
+
+ return 0;
+}
+
+static int nt36672a_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct nt36672a_panel *pinfo;
+ const struct nt36672a_panel_desc *desc;
+ int err;
+
+ pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return -ENOMEM;
+
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->mode_flags = desc->mode_flags;
+ dsi->format = desc->format;
+ dsi->lanes = desc->lanes;
+ pinfo->desc = desc;
+ pinfo->link = dsi;
+
+ mipi_dsi_set_drvdata(dsi, pinfo);
+
+ err = nt36672a_panel_add(pinfo);
+ if (err < 0)
+ return err;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int nt36672a_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = drm_panel_unprepare(&pinfo->base);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to unprepare panel: %d\n", err);
+
+ err = drm_panel_disable(&pinfo->base);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ drm_panel_remove(&pinfo->base);
+
+ return 0;
+}
+
+static void nt36672a_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct nt36672a_panel *pinfo = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&pinfo->base);
+ drm_panel_unprepare(&pinfo->base);
+}
+
+static const struct of_device_id tianma_fhd_video_of_match[] = {
+ { .compatible = "tianma,fhd-video", .data = &tianma_fhd_video_panel_desc },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tianma_fhd_video_of_match);
+
+static struct mipi_dsi_driver nt36672a_panel_driver = {
+ .driver = {
+ .name = "panel-tianma-nt36672a",
+ .of_match_table = tianma_fhd_video_of_match,
+ },
+ .probe = nt36672a_panel_probe,
+ .remove = nt36672a_panel_remove,
+ .shutdown = nt36672a_panel_shutdown,
+};
+module_mipi_dsi_driver(nt36672a_panel_driver);
+
+MODULE_AUTHOR("Sumit Semwal <sumit.semwal@linaro.org>");
+MODULE_DESCRIPTION("NOVATEK NT36672A based MIPI-DSI LCD panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index b6e377aa1131..f80b44a8a700 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -99,20 +99,6 @@ static void otm8009a_dcs_write_buf(struct otm8009a *ctx, const void *data,
dev_warn(ctx->dev, "mipi dsi dcs write buffer failed\n");
}
-static void otm8009a_dcs_write_buf_hs(struct otm8009a *ctx, const void *data,
- size_t len)
-{
- struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
-
- /* data will be sent in dsi hs mode (ie. no lpm) */
- dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
-
- otm8009a_dcs_write_buf(ctx, data, len);
-
- /* restore back the dsi lpm mode */
- dsi->mode_flags |= MIPI_DSI_MODE_LPM;
-}
-
#define dcs_write_seq(ctx, seq...) \
({ \
static const u8 d[] = { seq }; \
@@ -400,7 +386,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
*/
data[0] = MIPI_DCS_SET_DISPLAY_BRIGHTNESS;
data[1] = bd->props.brightness;
- otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
/* set Brightness Control & Backlight on */
data[1] = 0x24;
@@ -412,7 +398,7 @@ static int otm8009a_backlight_update_status(struct backlight_device *bd)
/* Update Brightness Control & Backlight */
data[0] = MIPI_DCS_WRITE_CONTROL_DISPLAY;
- otm8009a_dcs_write_buf_hs(ctx, data, ARRAY_SIZE(data));
+ otm8009a_dcs_write_buf(ctx, data, ARRAY_SIZE(data));
return 0;
}
@@ -452,7 +438,7 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 2;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index f908eeafb1af..412c0dbcb2b6 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -82,15 +82,15 @@ struct rm68200 {
};
static const struct drm_display_mode default_mode = {
- .clock = 52582,
+ .clock = 54000,
.hdisplay = 720,
- .hsync_start = 720 + 38,
- .hsync_end = 720 + 38 + 8,
- .htotal = 720 + 38 + 8 + 38,
+ .hsync_start = 720 + 48,
+ .hsync_end = 720 + 48 + 9,
+ .htotal = 720 + 48 + 9 + 48,
.vdisplay = 1280,
.vsync_start = 1280 + 12,
- .vsync_end = 1280 + 12 + 4,
- .vtotal = 1280 + 12 + 4 + 12,
+ .vsync_end = 1280 + 12 + 5,
+ .vtotal = 1280 + 12 + 5 + 12,
.flags = 0,
.width_mm = 68,
.height_mm = 122,
@@ -391,7 +391,7 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 2;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
- MIPI_DSI_MODE_LPM;
+ MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
DRM_MODE_CONNECTOR_DSI);
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 535c8d1cca21..a3782830ae3c 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -75,13 +75,8 @@ static int rb070d30_panel_unprepare(struct drm_panel *panel)
static int rb070d30_panel_enable(struct drm_panel *panel)
{
struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
- int ret;
- ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
- if (ret)
- return ret;
-
- return 0;
+ return mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
}
static int rb070d30_panel_disable(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 1d1c79a18613..0ab1b7ec84cd 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -214,7 +214,7 @@ static const u8 gamma_tbl[S6E3HA2_NUM_GAMMA_STEPS][S6E3HA2_GAMMA_CMD_CNT] = {
0x00, 0x00 }
};
-unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
+static const unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
0x18, 0x19, 0x1a, 0x1b, 0x1c,
0x1d, 0x1e, 0x1f, 0x20, 0x21
};
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 2be358fb46f7..8b82ec33f08a 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -3873,6 +3873,32 @@ static const struct panel_desc winstar_wf35ltiacd = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode yes_optoelectronics_ytc700tlag_05_201c_mode = {
+ .clock = 51200,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 100,
+ .hsync_end = 1024 + 100 + 100,
+ .htotal = 1024 + 100 + 100 + 120,
+ .vdisplay = 600,
+ .vsync_start = 600 + 10,
+ .vsync_end = 600 + 10 + 10,
+ .vtotal = 600 + 10 + 10 + 15,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
+ .modes = &yes_optoelectronics_ytc700tlag_05_201c_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 154,
+ .height = 90,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode arm_rtsm_mode[] = {
{
.clock = 65000,
@@ -4300,6 +4326,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "winstar,wf35ltiacd",
.data = &winstar_wf35ltiacd,
}, {
+ .compatible = "yes-optoelectronics,ytc700tlag-05-201c",
+ .data = &yes_optoelectronics_ytc700tlag_05_201c,
+ }, {
/* Must be the last entry */
.compatible = "panel-dpi",
.data = &panel_dpi,
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index c22e7c49e077..b30510b1696a 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -153,7 +153,7 @@ static const struct drm_display_mode jh057n00900_mode = {
.height_mm = 130,
};
-struct st7703_panel_desc jh057n00900_panel_desc = {
+static const struct st7703_panel_desc jh057n00900_panel_desc = {
.mode = &jh057n00900_mode,
.lanes = 4,
.mode_flags = MIPI_DSI_MODE_VIDEO |
diff --git a/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c b/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c
new file mode 100644
index 000000000000..820731be7147
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-tdo-tl070wsh30.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 BayLibre, SAS
+ * Author: Neil Armstrong <narmstrong@baylibre.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct tdo_tl070wsh30_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *link;
+
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline
+struct tdo_tl070wsh30_panel *to_tdo_tl070wsh30_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct tdo_tl070wsh30_panel, base);
+}
+
+static int tdo_tl070wsh30_panel_prepare(struct drm_panel *panel)
+{
+ struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = to_tdo_tl070wsh30_panel(panel);
+ int err;
+
+ if (tdo_tl070wsh30->prepared)
+ return 0;
+
+ err = regulator_enable(tdo_tl070wsh30->supply);
+ if (err < 0)
+ return err;
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(tdo_tl070wsh30->reset_gpio, 1);
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(tdo_tl070wsh30->reset_gpio, 0);
+
+ msleep(200);
+
+ err = mipi_dsi_dcs_exit_sleep_mode(tdo_tl070wsh30->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
+ regulator_disable(tdo_tl070wsh30->supply);
+ return err;
+ }
+
+ msleep(200);
+
+ err = mipi_dsi_dcs_set_display_on(tdo_tl070wsh30->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to set display on: %d\n", err);
+ regulator_disable(tdo_tl070wsh30->supply);
+ return err;
+ }
+
+ msleep(20);
+
+ tdo_tl070wsh30->prepared = true;
+
+ return 0;
+}
+
+static int tdo_tl070wsh30_panel_unprepare(struct drm_panel *panel)
+{
+ struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = to_tdo_tl070wsh30_panel(panel);
+ int err;
+
+ if (!tdo_tl070wsh30->prepared)
+ return 0;
+
+ err = mipi_dsi_dcs_set_display_off(tdo_tl070wsh30->link);
+ if (err < 0)
+ dev_err(panel->dev, "failed to set display off: %d\n", err);
+
+ usleep_range(10000, 11000);
+
+ err = mipi_dsi_dcs_enter_sleep_mode(tdo_tl070wsh30->link);
+ if (err < 0) {
+ dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
+ return err;
+ }
+
+ usleep_range(10000, 11000);
+
+ regulator_disable(tdo_tl070wsh30->supply);
+
+ tdo_tl070wsh30->prepared = false;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 47250,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 46,
+ .hsync_end = 1024 + 46 + 80,
+ .htotal = 1024 + 46 + 80 + 100,
+ .vdisplay = 600,
+ .vsync_start = 600 + 5,
+ .vsync_end = 600 + 5 + 5,
+ .vtotal = 600 + 5 + 5 + 20,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static int tdo_tl070wsh30_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ drm_mode_vrefresh(&default_mode));
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = 154;
+ connector->display_info.height_mm = 85;
+ connector->display_info.bpc = 8;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs tdo_tl070wsh30_panel_funcs = {
+ .unprepare = tdo_tl070wsh30_panel_unprepare,
+ .prepare = tdo_tl070wsh30_panel_prepare,
+ .get_modes = tdo_tl070wsh30_panel_get_modes,
+};
+
+static const struct of_device_id tdo_tl070wsh30_of_match[] = {
+ { .compatible = "tdo,tl070wsh30", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tdo_tl070wsh30_of_match);
+
+static int tdo_tl070wsh30_panel_add(struct tdo_tl070wsh30_panel *tdo_tl070wsh30)
+{
+ struct device *dev = &tdo_tl070wsh30->link->dev;
+ int err;
+
+ tdo_tl070wsh30->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(tdo_tl070wsh30->supply))
+ return PTR_ERR(tdo_tl070wsh30->supply);
+
+ tdo_tl070wsh30->reset_gpio = devm_gpiod_get(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(tdo_tl070wsh30->reset_gpio)) {
+ err = PTR_ERR(tdo_tl070wsh30->reset_gpio);
+ dev_dbg(dev, "failed to get reset gpio: %d\n", err);
+ return err;
+ }
+
+ drm_panel_init(&tdo_tl070wsh30->base, &tdo_tl070wsh30->link->dev,
+ &tdo_tl070wsh30_panel_funcs, DRM_MODE_CONNECTOR_DSI);
+
+ err = drm_panel_of_backlight(&tdo_tl070wsh30->base);
+ if (err)
+ return err;
+
+ drm_panel_add(&tdo_tl070wsh30->base);
+
+ return 0;
+}
+
+static int tdo_tl070wsh30_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct tdo_tl070wsh30_panel *tdo_tl070wsh30;
+ int err;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM;
+
+ tdo_tl070wsh30 = devm_kzalloc(&dsi->dev, sizeof(*tdo_tl070wsh30),
+ GFP_KERNEL);
+ if (!tdo_tl070wsh30)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, tdo_tl070wsh30);
+ tdo_tl070wsh30->link = dsi;
+
+ err = tdo_tl070wsh30_panel_add(tdo_tl070wsh30);
+ if (err < 0)
+ return err;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int tdo_tl070wsh30_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi);
+ int err;
+
+ err = mipi_dsi_detach(dsi);
+ if (err < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
+
+ drm_panel_remove(&tdo_tl070wsh30->base);
+ drm_panel_disable(&tdo_tl070wsh30->base);
+ drm_panel_unprepare(&tdo_tl070wsh30->base);
+
+ return 0;
+}
+
+static void tdo_tl070wsh30_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct tdo_tl070wsh30_panel *tdo_tl070wsh30 = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&tdo_tl070wsh30->base);
+ drm_panel_unprepare(&tdo_tl070wsh30->base);
+}
+
+static struct mipi_dsi_driver tdo_tl070wsh30_panel_driver = {
+ .driver = {
+ .name = "panel-tdo-tl070wsh30",
+ .of_match_table = tdo_tl070wsh30_of_match,
+ },
+ .probe = tdo_tl070wsh30_panel_probe,
+ .remove = tdo_tl070wsh30_panel_remove,
+ .shutdown = tdo_tl070wsh30_panel_shutdown,
+};
+module_mipi_dsi_driver(tdo_tl070wsh30_panel_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("TDO TL070WSH30 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 037c14fd6bac..ba0c00d1a001 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -242,13 +242,8 @@ static int td028ttec1_prepare(struct drm_panel *panel)
static int td028ttec1_enable(struct drm_panel *panel)
{
struct td028ttec1_panel *lcd = to_td028ttec1_device(panel);
- int ret;
- ret = jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL);
- if (ret)
- return ret;
-
- return 0;
+ return jbt_ret_write_0(lcd, JBT_REG_DISPLAY_ON, NULL);
}
static int td028ttec1_disable(struct drm_panel *panel)
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 8ab025d0035f..913eaa6d0bc6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -29,18 +29,13 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct dev_pm_opp *opp;
- int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
dev_pm_opp_put(opp);
- err = dev_pm_opp_set_rate(dev, *freq);
- if (err)
- return err;
-
- return 0;
+ return dev_pm_opp_set_rate(dev, *freq);
}
static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index e6896733838a..ea8d31863c50 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -18,19 +18,13 @@
static int panfrost_reset_init(struct panfrost_device *pfdev)
{
- int err;
-
pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true);
if (IS_ERR(pfdev->rstc)) {
dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
return PTR_ERR(pfdev->rstc);
}
- err = reset_control_deassert(pfdev->rstc);
- if (err)
- return err;
-
- return 0;
+ return reset_control_deassert(pfdev->rstc);
}
static void panfrost_reset_fini(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 30e7b7196dab..d0469e944143 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -25,7 +25,8 @@
struct panfrost_queue_state {
struct drm_gpu_scheduler sched;
-
+ bool stopped;
+ struct mutex lock;
u64 fence_context;
u64 emit_seqno;
};
@@ -369,6 +370,24 @@ void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
job_write(pfdev, JOB_INT_MASK, irq_mask);
}
+static bool panfrost_scheduler_stop(struct panfrost_queue_state *queue,
+ struct drm_sched_job *bad)
+{
+ bool stopped = false;
+
+ mutex_lock(&queue->lock);
+ if (!queue->stopped) {
+ drm_sched_stop(&queue->sched, bad);
+ if (bad)
+ drm_sched_increase_karma(bad);
+ queue->stopped = true;
+ stopped = true;
+ }
+ mutex_unlock(&queue->lock);
+
+ return stopped;
+}
+
static void panfrost_job_timedout(struct drm_sched_job *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
@@ -392,19 +411,39 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
job_read(pfdev, JS_TAIL_LO(js)),
sched_job);
+ /* Scheduler is already stopped, nothing to do. */
+ if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
+ return;
+
if (!mutex_trylock(&pfdev->reset_lock))
return;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
- drm_sched_stop(sched, sched_job);
- if (js != i)
- /* Ensure any timeouts on other slots have finished */
+ /*
+ * If the queue is still active, make sure we wait for any
+ * pending timeouts.
+ */
+ if (!pfdev->js->queue[i].stopped)
cancel_delayed_work_sync(&sched->work_tdr);
- }
- drm_sched_increase_karma(sched_job);
+ /*
+ * If the scheduler was not already stopped, there's a tiny
+ * chance a timeout has expired just before we stopped it, and
+ * drm_sched_stop() does not flush pending works. Let's flush
+ * them now so the timeout handler doesn't get called in the
+ * middle of a reset.
+ */
+ if (panfrost_scheduler_stop(&pfdev->js->queue[i], NULL))
+ cancel_delayed_work_sync(&sched->work_tdr);
+
+ /*
+ * Now that we cancelled the pending timeouts, we can safely
+ * reset the stopped state.
+ */
+ pfdev->js->queue[i].stopped = false;
+ }
spin_lock_irqsave(&pfdev->js->job_lock, flags);
for (i = 0; i < NUM_JOB_SLOTS; i++) {
@@ -421,11 +460,11 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
+ mutex_unlock(&pfdev->reset_lock);
+
/* restart scheduler after GPU is usable again */
for (i = 0; i < NUM_JOB_SLOTS; i++)
drm_sched_start(&pfdev->js->queue[i].sched, true);
-
- mutex_unlock(&pfdev->reset_lock);
}
static const struct drm_sched_backend_ops panfrost_sched_ops = {
@@ -558,6 +597,7 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
int ret, i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ mutex_init(&js->queue[i].lock);
sched = &js->queue[i].sched;
ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
DRM_SCHED_PRIORITY_NORMAL, &sched,
@@ -570,10 +610,14 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
{
+ struct panfrost_device *pfdev = panfrost_priv->pfdev;
+ struct panfrost_job_slot *js = pfdev->js;
int i;
- for (i = 0; i < NUM_JOB_SLOTS; i++)
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
+ mutex_destroy(&js->queue[i].lock);
+ }
}
int panfrost_job_is_idle(struct panfrost_device *pfdev)
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index 46b0d1c4a16c..ecef8a2383d2 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -224,15 +224,12 @@ static struct drm_driver pl111_drm_driver = {
.major = 1,
.minor = 0,
.patchlevel = 0,
+ .gem_create_object = drm_gem_cma_create_object_default_funcs,
.dumb_create = drm_gem_cma_dumb_create,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = pl111_gem_import_sg_table,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = pl111_debugfs_init,
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index 524d35b648d8..183d15e2cf58 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -67,7 +67,7 @@ qxl_debugfs_buffers_info(struct seq_file *m, void *data)
seq_printf(m, "size %ld, pc %d, num releases %d\n",
(unsigned long)bo->tbo.base.size,
- bo->pin_count, rel);
+ bo->tbo.pin_count, rel);
}
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 6063f3a15329..45fd76e04bdc 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -444,13 +444,13 @@ static const struct drm_framebuffer_funcs qxl_fb_funcs = {
};
static void qxl_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
qxl_crtc_update_monitors_config(crtc, "enable");
}
static void qxl_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
qxl_crtc_update_monitors_config(crtc, "disable");
}
@@ -768,7 +768,6 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
struct qxl_surface surf;
- int ret;
if (!new_state->fb)
return 0;
@@ -804,11 +803,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
}
}
- ret = qxl_bo_pin(user_bo);
- if (ret)
- return ret;
-
- return 0;
+ return qxl_bo_pin(user_bo);
}
static void qxl_plane_cleanup_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index aae90a9ee1db..3602e8b34189 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -80,7 +80,6 @@ struct qxl_bo {
struct ttm_place placements[3];
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
- unsigned int pin_count;
void *kptr;
unsigned int map_count;
int type;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 5cea6eea72ab..0bab9ec6adc1 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -326,8 +326,8 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out;
- if (!qobj->pin_count) {
- qxl_ttm_placement_from_domain(qobj, qobj->type, false);
+ if (!qobj->tbo.pin_count) {
+ qxl_ttm_placement_from_domain(qobj, qobj->type);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
if (unlikely(ret))
goto out;
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 2bc364412e8b..547d46c14d56 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -51,14 +51,12 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
return false;
}
-void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
{
u32 c = 0;
u32 pflag = 0;
unsigned int i;
- if (pinned)
- pflag |= TTM_PL_FLAG_NO_EVICT;
if (qbo->tbo.base.size <= PAGE_SIZE)
pflag |= TTM_PL_FLAG_TOPDOWN;
@@ -66,21 +64,21 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM) {
qbo->placements[c].mem_type = TTM_PL_VRAM;
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ qbo->placements[c++].flags = pflag;
}
if (domain == QXL_GEM_DOMAIN_SURFACE) {
qbo->placements[c].mem_type = TTM_PL_PRIV;
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ qbo->placements[c++].flags = pflag;
qbo->placements[c].mem_type = TTM_PL_VRAM;
- qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
+ qbo->placements[c++].flags = pflag;
}
if (domain == QXL_GEM_DOMAIN_CPU) {
qbo->placements[c].mem_type = TTM_PL_SYSTEM;
- qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag;
+ qbo->placements[c++].flags = pflag;
}
if (!c) {
qbo->placements[c].mem_type = TTM_PL_SYSTEM;
- qbo->placements[c++].flags = TTM_PL_MASK_CACHING;
+ qbo->placements[c++].flags = 0;
}
qbo->placement.num_placement = c;
qbo->placement.num_busy_placement = c;
@@ -108,6 +106,7 @@ int qxl_bo_create(struct qxl_device *qdev,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
+ struct ttm_operation_ctx ctx = { !kernel, false };
struct qxl_bo *bo;
enum ttm_bo_type type;
int r;
@@ -128,18 +127,17 @@ int qxl_bo_create(struct qxl_device *qdev,
}
bo->tbo.base.funcs = &qxl_object_funcs;
bo->type = domain;
- bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
INIT_LIST_HEAD(&bo->list);
if (surf)
bo->surf = *surf;
- qxl_ttm_placement_from_domain(bo, domain, pinned);
+ qxl_ttm_placement_from_domain(bo, domain);
- r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, !kernel, size,
- NULL, NULL, &qxl_ttm_bo_destroy);
+ r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, size, type,
+ &bo->placement, 0, &ctx, size,
+ NULL, NULL, &qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(qdev->ddev.dev,
@@ -147,6 +145,9 @@ int qxl_bo_create(struct qxl_device *qdev,
size, domain);
return r;
}
+ if (pinned)
+ ttm_bo_pin(&bo->tbo);
+ ttm_bo_unreserve(&bo->tbo);
*bo_ptr = bo;
return 0;
}
@@ -248,39 +249,22 @@ static int __qxl_bo_pin(struct qxl_bo *bo)
struct drm_device *ddev = bo->tbo.base.dev;
int r;
- if (bo->pin_count) {
- bo->pin_count++;
+ if (bo->tbo.pin_count) {
+ ttm_bo_pin(&bo->tbo);
return 0;
}
- qxl_ttm_placement_from_domain(bo, bo->type, true);
+ qxl_ttm_placement_from_domain(bo, bo->type);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (likely(r == 0)) {
- bo->pin_count = 1;
- }
+ if (likely(r == 0))
+ ttm_bo_pin(&bo->tbo);
if (unlikely(r != 0))
dev_err(ddev->dev, "%p pin failed\n", bo);
return r;
}
-static int __qxl_bo_unpin(struct qxl_bo *bo)
+static void __qxl_bo_unpin(struct qxl_bo *bo)
{
- struct ttm_operation_ctx ctx = { false, false };
- struct drm_device *ddev = bo->tbo.base.dev;
- int r, i;
-
- if (!bo->pin_count) {
- dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (unlikely(r != 0))
- dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
- return r;
+ ttm_bo_unpin(&bo->tbo);
}
/*
@@ -314,9 +298,9 @@ int qxl_bo_unpin(struct qxl_bo *bo)
if (r)
return r;
- r = __qxl_bo_unpin(bo);
+ __qxl_bo_unpin(bo);
qxl_bo_unreserve(bo);
- return r;
+ return 0;
}
void qxl_bo_force_delete(struct qxl_device *qdev)
@@ -367,10 +351,16 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
int qxl_surf_evict(struct qxl_device *qdev)
{
- return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV);
+ return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
}
int qxl_vram_evict(struct qxl_device *qdev)
{
- return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM);
+ return ttm_resource_manager_evict_all(&qdev->mman.bdev, man);
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index 6b434e5ef795..09a5c818324d 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -58,29 +58,6 @@ static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
-static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
- bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS) {
- struct drm_device *ddev = bo->tbo.base.dev;
-
- dev_err(ddev->dev, "%p reserve failed for wait\n",
- bo);
- }
- return r;
- }
- if (mem_type)
- *mem_type = bo->tbo.mem.mem_type;
-
- r = ttm_bo_wait(&bo->tbo, true, no_wait);
- ttm_bo_unreserve(&bo->tbo);
- return r;
-}
-
extern int qxl_bo_create(struct qxl_device *qdev,
unsigned long size,
bool kernel, bool pinned, u32 domain,
@@ -94,7 +71,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
extern void qxl_bo_unref(struct qxl_bo **bo);
extern int qxl_bo_pin(struct qxl_bo *bo);
extern int qxl_bo_unpin(struct qxl_bo *bo);
-extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 4fae3e393da1..e75e364655b8 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -231,8 +231,8 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
struct ttm_operation_ctx ctx = { true, false };
int ret;
- if (!bo->pin_count) {
- qxl_ttm_placement_from_domain(bo, bo->type, false);
+ if (!bo->tbo.pin_count) {
+ qxl_ttm_placement_from_domain(bo, bo->type);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index fd691fff8394..9609eeb52821 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -56,7 +56,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
if (!qxl_ttm_bo_is_qxl_bo(bo)) {
@@ -67,7 +67,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
return;
}
qbo = to_qxl_bo(bo);
- qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
+ qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
*placement = qbo->placement;
}
@@ -83,11 +83,13 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
case TTM_PL_VRAM:
mem->bus.is_iomem = true;
mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base;
+ mem->bus.caching = ttm_cached;
break;
case TTM_PL_PRIV:
mem->bus.is_iomem = true;
mem->bus.offset = (mem->start << PAGE_SHIFT) +
qdev->surfaceram_base;
+ mem->bus.caching = ttm_cached;
break;
default:
return -EINVAL;
@@ -98,59 +100,43 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
/*
* TTM backend functions.
*/
-struct qxl_ttm_tt {
- struct ttm_tt ttm;
- struct qxl_device *qdev;
- u64 offset;
-};
-
-static int qxl_ttm_backend_bind(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm,
- struct ttm_resource *bo_mem)
-{
- struct qxl_ttm_tt *gtt = (void *)ttm;
-
- gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
- if (!ttm->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
- ttm->num_pages, bo_mem, ttm);
- }
- /* Not implemented */
- return -1;
-}
-
-static void qxl_ttm_backend_unbind(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm)
-{
- /* Not implemented */
-}
-
static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{
- struct qxl_ttm_tt *gtt = (void *)ttm;
-
ttm_tt_destroy_common(bdev, ttm);
- ttm_tt_fini(&gtt->ttm);
- kfree(gtt);
+ ttm_tt_fini(ttm);
+ kfree(ttm);
}
static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{
- struct qxl_device *qdev;
- struct qxl_ttm_tt *gtt;
+ struct ttm_tt *ttm;
- qdev = qxl_get_qdev(bo->bdev);
- gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL);
- if (gtt == NULL)
+ ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
+ if (ttm == NULL)
return NULL;
- gtt->qdev = qdev;
- if (ttm_tt_init(&gtt->ttm, bo, page_flags)) {
- kfree(gtt);
+ if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) {
+ kfree(ttm);
return NULL;
}
- return &gtt->ttm;
+ return ttm;
+}
+
+static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_resource *new_mem)
+{
+ struct qxl_bo *qbo;
+ struct qxl_device *qdev;
+
+ if (!qxl_ttm_bo_is_qxl_bo(bo))
+ return;
+ qbo = to_qxl_bo(bo);
+ qdev = to_qxl(qbo->tbo.base.dev);
+
+ if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
+ qxl_surface_evict(qdev, qbo, new_mem ? true : false);
}
static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
@@ -160,43 +146,39 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = &bo->mem;
int ret;
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+ qxl_bo_move_notify(bo, evict, new_mem);
+
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
- return ret;
+ goto out;
if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
- return ttm_bo_move_memcpy(bo, ctx, new_mem);
+ ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
+out:
+ if (ret) {
+ swap(*new_mem, bo->mem);
+ qxl_bo_move_notify(bo, false, new_mem);
+ swap(*new_mem, bo->mem);
+ }
+ return ret;
}
-static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_resource *new_mem)
+static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
- struct qxl_bo *qbo;
- struct qxl_device *qdev;
-
- if (!qxl_ttm_bo_is_qxl_bo(bo))
- return;
- qbo = to_qxl_bo(bo);
- qdev = to_qxl(qbo->tbo.base.dev);
-
- if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
- qxl_surface_evict(qdev, qbo, new_mem ? true : false);
+ qxl_bo_move_notify(bo, false, NULL);
}
static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
- .ttm_tt_bind = &qxl_ttm_backend_bind,
.ttm_tt_destroy = &qxl_ttm_backend_destroy,
- .ttm_tt_unbind = &qxl_ttm_backend_unbind,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
.move = &qxl_bo_move,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
- .move_notify = &qxl_bo_move_notify,
+ .delete_mem_notify = &qxl_bo_delete_mem_notify,
};
static int qxl_ttm_init_mem_type(struct qxl_device *qdev,
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a6d8de01194a..5d54bccebd4d 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -497,7 +497,6 @@ struct radeon_bo {
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
u32 flags;
- unsigned pin_count;
void *kptr;
u32 tiling_flags;
u32 pitch;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index e0ae911ef427..b79686cf8bdb 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -273,10 +273,7 @@ static void radeon_unpin_work_func(struct work_struct *__work)
/* unpin of the old buffer */
r = radeon_bo_reserve(work->old_rbo, false);
if (likely(r == 0)) {
- r = radeon_bo_unpin(work->old_rbo);
- if (unlikely(r != 0)) {
- DRM_ERROR("failed to unpin buffer after flip\n");
- }
+ radeon_bo_unpin(work->old_rbo);
radeon_bo_unreserve(work->old_rbo);
} else
DRM_ERROR("failed to reserve buffer after flip\n");
@@ -607,9 +604,7 @@ pflip_cleanup:
DRM_ERROR("failed to reserve new rbo in error path\n");
goto cleanup;
}
- if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
- DRM_ERROR("failed to unpin new rbo in error path\n");
- }
+ radeon_bo_unpin(new_rbo);
radeon_bo_unreserve(new_rbo);
cleanup:
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 4cd30613fa1d..65061c949aee 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -124,13 +124,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg);
-void radeon_gem_object_free(struct drm_gem_object *obj);
-int radeon_gem_object_open(struct drm_gem_object *obj,
- struct drm_file *file_priv);
-void radeon_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv);
-struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
- int flags);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc,
unsigned int flags, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
@@ -145,14 +138,9 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *,
struct sg_table *sg);
-int radeon_gem_prime_pin(struct drm_gem_object *obj);
-void radeon_gem_prime_unpin(struct drm_gem_object *obj);
-void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
-void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
/* atpx handler */
#if defined(CONFIG_VGA_SWITCHEROO)
@@ -550,7 +538,7 @@ long radeon_drm_ioctl(struct file *filp,
}
ret = drm_ioctl(filp, cmd, arg);
-
+
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return ret;
@@ -609,22 +597,13 @@ static struct drm_driver kms_driver = {
.irq_uninstall = radeon_driver_irq_uninstall_kms,
.irq_handler = radeon_driver_irq_handler_kms,
.ioctls = radeon_ioctls_kms,
- .gem_free_object_unlocked = radeon_gem_object_free,
- .gem_open_object = radeon_gem_object_open,
- .gem_close_object = radeon_gem_object_close,
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
.fops = &radeon_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = radeon_gem_prime_export,
- .gem_prime_pin = radeon_gem_prime_pin,
- .gem_prime_unpin = radeon_gem_prime_unpin,
- .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table,
.gem_prime_import_sg_table = radeon_gem_prime_import_sg_table,
- .gem_prime_vmap = radeon_gem_prime_vmap,
- .gem_prime_vunmap = radeon_gem_prime_vunmap,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index e5c4271e64ed..0ccd7213e41f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -35,7 +35,17 @@
#include "radeon.h"
-void radeon_gem_object_free(struct drm_gem_object *gobj)
+struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj,
+ int flags);
+struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
+int radeon_gem_prime_pin(struct drm_gem_object *obj);
+void radeon_gem_prime_unpin(struct drm_gem_object *obj);
+void *radeon_gem_prime_vmap(struct drm_gem_object *obj);
+void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+static const struct drm_gem_object_funcs radeon_gem_object_funcs;
+
+static void radeon_gem_object_free(struct drm_gem_object *gobj)
{
struct radeon_bo *robj = gem_to_radeon_bo(gobj);
@@ -85,6 +95,7 @@ retry:
return r;
}
*obj = &robj->tbo.base;
+ (*obj)->funcs = &radeon_gem_object_funcs;
robj->pid = task_pid_nr(current);
mutex_lock(&rdev->gem.mutex);
@@ -146,7 +157,7 @@ void radeon_gem_fini(struct radeon_device *rdev)
* Call from drm_gem_handle_create which appear in both new and open ioctl
* case.
*/
-int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
struct radeon_bo *rbo = gem_to_radeon_bo(obj);
struct radeon_device *rdev = rbo->rdev;
@@ -176,8 +187,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
return 0;
}
-void radeon_gem_object_close(struct drm_gem_object *obj,
- struct drm_file *file_priv)
+static void radeon_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
{
struct radeon_bo *rbo = gem_to_radeon_bo(obj);
struct radeon_device *rdev = rbo->rdev;
@@ -216,6 +227,18 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
return r;
}
+static const struct drm_gem_object_funcs radeon_gem_object_funcs = {
+ .free = radeon_gem_object_free,
+ .open = radeon_gem_object_open,
+ .close = radeon_gem_object_close,
+ .export = radeon_gem_prime_export,
+ .pin = radeon_gem_prime_pin,
+ .unpin = radeon_gem_prime_unpin,
+ .get_sg_table = radeon_gem_prime_get_sg_table,
+ .vmap = radeon_gem_prime_vmap,
+ .vunmap = radeon_gem_prime_vunmap,
+};
+
/*
* GEM ioctls.
*/
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 316e35d3f8a9..ab81e35cb060 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -113,57 +113,29 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
rbo->placements[c].fpfn =
rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
rbo->placements[c].mem_type = TTM_PL_VRAM;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
+ rbo->placements[c++].flags = 0;
}
rbo->placements[c].fpfn = 0;
rbo->placements[c].mem_type = TTM_PL_VRAM;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
+ rbo->placements[c++].flags = 0;
}
if (domain & RADEON_GEM_DOMAIN_GTT) {
- if (rbo->flags & RADEON_GEM_GTT_UC) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_TT;
- rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED;
-
- } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
- (rbo->rdev->flags & RADEON_IS_AGP)) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_TT;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
- } else {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_TT;
- rbo->placements[c++].flags = TTM_PL_FLAG_CACHED;
- }
+ rbo->placements[c].fpfn = 0;
+ rbo->placements[c].mem_type = TTM_PL_TT;
+ rbo->placements[c++].flags = 0;
}
if (domain & RADEON_GEM_DOMAIN_CPU) {
- if (rbo->flags & RADEON_GEM_GTT_UC) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_SYSTEM;
- rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED;
-
- } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
- rbo->rdev->flags & RADEON_IS_AGP) {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_SYSTEM;
- rbo->placements[c++].flags = TTM_PL_FLAG_WC |
- TTM_PL_FLAG_UNCACHED;
- } else {
- rbo->placements[c].fpfn = 0;
- rbo->placements[c].mem_type = TTM_PL_SYSTEM;
- rbo->placements[c++].flags = TTM_PL_FLAG_CACHED;
- }
+ rbo->placements[c].fpfn = 0;
+ rbo->placements[c].mem_type = TTM_PL_SYSTEM;
+ rbo->placements[c++].flags = 0;
}
if (!c) {
rbo->placements[c].fpfn = 0;
rbo->placements[c].mem_type = TTM_PL_SYSTEM;
- rbo->placements[c++].flags = TTM_PL_MASK_CACHING;
+ rbo->placements[c++].flags = 0;
}
rbo->placement.num_placement = c;
@@ -334,8 +306,8 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm))
return -EPERM;
- if (bo->pin_count) {
- bo->pin_count++;
+ if (bo->tbo.pin_count) {
+ ttm_bo_pin(&bo->tbo);
if (gpu_addr)
*gpu_addr = radeon_bo_gpu_offset(bo);
@@ -367,13 +339,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
else
bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
-
- bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
- bo->pin_count = 1;
+ ttm_bo_pin(&bo->tbo);
if (gpu_addr != NULL)
*gpu_addr = radeon_bo_gpu_offset(bo);
if (domain == RADEON_GEM_DOMAIN_VRAM)
@@ -391,36 +361,22 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
-int radeon_bo_unpin(struct radeon_bo *bo)
+void radeon_bo_unpin(struct radeon_bo *bo)
{
- struct ttm_operation_ctx ctx = { false, false };
- int r, i;
-
- if (!bo->pin_count) {
- dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
- return 0;
- }
- bo->pin_count--;
- if (bo->pin_count)
- return 0;
- for (i = 0; i < bo->placement.num_placement; i++) {
- bo->placements[i].lpfn = 0;
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- }
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (likely(r == 0)) {
+ ttm_bo_unpin(&bo->tbo);
+ if (!bo->tbo.pin_count) {
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
bo->rdev->vram_pin_size -= radeon_bo_size(bo);
else
bo->rdev->gart_pin_size -= radeon_bo_size(bo);
- } else {
- dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
}
- return r;
}
int radeon_bo_evict_vram(struct radeon_device *rdev)
{
+ struct ttm_bo_device *bdev = &rdev->mman.bdev;
+ struct ttm_resource_manager *man;
+
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
#ifndef CONFIG_HIBERNATION
if (rdev->flags & RADEON_IS_IGP) {
@@ -429,7 +385,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
return 0;
}
#endif
- return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+ man = ttm_manager_type(bdev, TTM_PL_VRAM);
+ return ttm_resource_manager_evict_all(bdev, man);
}
void radeon_bo_force_delete(struct radeon_device *rdev)
@@ -549,7 +506,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
list_for_each_entry(lobj, head, tv.head) {
struct radeon_bo *bo = lobj->robj;
- if (!bo->pin_count) {
+ if (!bo->tbo.pin_count) {
u32 domain = lobj->preferred_domains;
u32 allowed = lobj->allowed_domains;
u32 current_domain =
@@ -629,7 +586,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
break;
old_object = reg->bo;
- if (old_object->pin_count == 0)
+ if (old_object->tbo.pin_count == 0)
steal = i;
}
@@ -794,7 +751,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
}
-int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct ttm_operation_ctx ctx = { false, false };
struct radeon_device *rdev;
@@ -816,8 +773,8 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return 0;
/* Can't move a pinned BO to visible VRAM */
- if (rbo->pin_count > 0)
- return -EINVAL;
+ if (rbo->tbo.pin_count > 0)
+ return VM_FAULT_SIGBUS;
/* hurrah the memory is not visible ! */
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
@@ -831,34 +788,23 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
r = ttm_bo_validate(bo, &rbo->placement, &ctx);
if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
- return ttm_bo_validate(bo, &rbo->placement, &ctx);
- } else if (unlikely(r != 0)) {
- return r;
+ r = ttm_bo_validate(bo, &rbo->placement, &ctx);
+ } else if (likely(!r)) {
+ offset = bo->mem.start << PAGE_SHIFT;
+ /* this should never happen */
+ if ((offset + size) > rdev->mc.visible_vram_size)
+ return VM_FAULT_SIGBUS;
}
- offset = bo->mem.start << PAGE_SHIFT;
- /* this should never happen */
- if ((offset + size) > rdev->mc.visible_vram_size)
- return -EINVAL;
+ if (unlikely(r == -EBUSY || r == -ERESTARTSYS))
+ return VM_FAULT_NOPAGE;
+ else if (unlikely(r))
+ return VM_FAULT_SIGBUS;
+ ttm_bo_move_to_lru_tail_unlocked(bo);
return 0;
}
-int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
-{
- int r;
-
- r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
- if (unlikely(r != 0))
- return r;
- if (mem_type)
- *mem_type = bo->tbo.mem.mem_type;
-
- r = ttm_bo_wait(&bo->tbo, true, no_wait);
- ttm_bo_unreserve(&bo->tbo);
- return r;
-}
-
/**
* radeon_bo_fence - add fence to buffer object
*
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 44b47241ee42..d606e9a935e3 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -133,9 +133,6 @@ static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
}
-extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
- bool no_wait);
-
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u32 flags,
@@ -149,7 +146,7 @@ extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
u64 max_offset, u64 *gpu_addr);
-extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern void radeon_bo_unpin(struct radeon_bo *bo);
extern int radeon_bo_evict_vram(struct radeon_device *rdev);
extern void radeon_bo_force_delete(struct radeon_device *rdev);
extern int radeon_bo_init(struct radeon_device *rdev);
@@ -166,7 +163,7 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *new_mem);
-extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
bool shared);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 36150b7f31a9..0a6d7ea847db 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -59,6 +59,8 @@ static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
+static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
+ struct ttm_tt *ttm);
struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
{
@@ -89,7 +91,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_MASK_CACHING
+ .flags = 0
};
struct radeon_bo *rbo;
@@ -151,7 +153,7 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
}
static int radeon_move_blit(struct ttm_buffer_object *bo,
- bool evict, bool no_wait_gpu,
+ bool evict,
struct ttm_resource *new_mem,
struct ttm_resource *old_mem)
{
@@ -206,11 +208,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
- struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct ttm_resource *old_mem = &bo->mem;
struct ttm_resource tmp_mem;
struct ttm_place placements;
@@ -226,18 +227,13 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
- placements.flags = TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
+ placements.flags = 0;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
return r;
}
- r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
- if (unlikely(r)) {
- goto out_cleanup;
- }
-
- r = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+ r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -246,22 +242,27 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
+ r = radeon_move_blit(bo, true, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, &ctx, new_mem);
+ r = ttm_bo_wait_ctx(bo, ctx);
+ if (unlikely(r))
+ goto out_cleanup;
+
+ radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
out_cleanup:
ttm_resource_free(bo, &tmp_mem);
return r;
}
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
- struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
struct ttm_resource *old_mem = &bo->mem;
struct ttm_resource tmp_mem;
struct ttm_placement placement;
@@ -277,16 +278,22 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
placements.fpfn = 0;
placements.lpfn = 0;
placements.mem_type = TTM_PL_TT;
- placements.flags = TTM_PL_MASK_CACHING;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx);
+ placements.flags = 0;
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem);
- if (unlikely(r)) {
+
+ r = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (unlikely(r))
goto out_cleanup;
- }
- r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
+
+ r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem);
+ if (unlikely(r))
+ goto out_cleanup;
+
+ ttm_bo_assign_mem(bo, &tmp_mem);
+ r = radeon_move_blit(bo, true, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -304,13 +311,20 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = &bo->mem;
int r;
- r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+ if (new_mem->mem_type == TTM_PL_TT) {
+ r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem);
+ if (r)
+ return r;
+ }
+ radeon_bo_move_notify(bo, evict, new_mem);
+
+ r = ttm_bo_wait_ctx(bo, ctx);
if (r)
- return r;
+ goto fail;
/* Can't move a pinned BO */
rbo = container_of(bo, struct radeon_bo, tbo);
- if (WARN_ON_ONCE(rbo->pin_count > 0))
+ if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
return -EINVAL;
rdev = radeon_get_rdev(bo->bdev);
@@ -318,14 +332,19 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
ttm_bo_move_null(bo, new_mem);
return 0;
}
- if ((old_mem->mem_type == TTM_PL_TT &&
- new_mem->mem_type == TTM_PL_SYSTEM) ||
- (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_TT)) {
- /* bind is enough */
+ if (old_mem->mem_type == TTM_PL_SYSTEM &&
+ new_mem->mem_type == TTM_PL_TT) {
ttm_bo_move_null(bo, new_mem);
return 0;
}
+
+ if (old_mem->mem_type == TTM_PL_TT &&
+ new_mem->mem_type == TTM_PL_SYSTEM) {
+ radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
+ }
if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
rdev->asic->copy.copy == NULL) {
/* use memcpy */
@@ -334,14 +353,12 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
- r = radeon_move_vram_ram(bo, evict, ctx->interruptible,
- ctx->no_wait_gpu, new_mem);
+ r = radeon_move_vram_ram(bo, evict, ctx, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
- r = radeon_move_ram_vram(bo, evict, ctx->interruptible,
- ctx->no_wait_gpu, new_mem);
+ r = radeon_move_ram_vram(bo, evict, ctx, new_mem);
} else {
- r = radeon_move_blit(bo, evict, ctx->no_wait_gpu,
+ r = radeon_move_blit(bo, evict,
new_mem, old_mem);
}
@@ -349,13 +366,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
memcpy:
r = ttm_bo_move_memcpy(bo, ctx, new_mem);
if (r) {
- return r;
+ goto fail;
}
}
/* update statistics */
atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved);
return 0;
+fail:
+ swap(*new_mem, bo->mem);
+ radeon_bo_move_notify(bo, false, new_mem);
+ swap(*new_mem, bo->mem);
+ return r;
}
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
@@ -374,6 +396,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
mem->bus.offset = (mem->start << PAGE_SHIFT) +
rdev->mc.agp_base;
mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+ mem->bus.caching = ttm_write_combined;
}
#endif
break;
@@ -384,17 +407,13 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
return -EINVAL;
mem->bus.offset += rdev->mc.aper_base;
mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_write_combined;
#ifdef __alpha__
/*
* Alpha: use bus.addr to hold the ioremap() return,
* so we can modify bus.base below.
*/
- if (mem->placement & TTM_PL_FLAG_WC)
- mem->bus.addr =
- ioremap_wc(mem->bus.offset, bus_size);
- else
- mem->bus.addr =
- ioremap(mem->bus.offset, bus_size);
+ mem->bus.addr = ioremap_wc(mem->bus.offset, bus_size);
if (!mem->bus.addr)
return -ENOMEM;
@@ -418,7 +437,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
* TTM backend functions.
*/
struct radeon_ttm_tt {
- struct ttm_dma_tt ttm;
+ struct ttm_tt ttm;
u64 offset;
uint64_t userptr;
@@ -545,15 +564,15 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
if (!ttm->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
}
- if (ttm->caching_state == tt_cached)
+ if (ttm->caching == ttm_cached)
flags |= RADEON_GART_PAGE_SNOOP;
r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+ DRM_ERROR("failed to bind %u pages at 0x%08X\n",
ttm->num_pages, (unsigned)gtt->offset);
return r;
}
@@ -583,7 +602,7 @@ static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt
radeon_ttm_backend_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
- ttm_dma_tt_fini(&gtt->ttm);
+ ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
@@ -592,6 +611,10 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
{
struct radeon_device *rdev;
struct radeon_ttm_tt *gtt;
+ enum ttm_caching caching;
+ struct radeon_bo *rbo;
+
+ rbo = container_of(bo, struct radeon_bo, tbo);
rdev = radeon_get_rdev(bo->bdev);
#if IS_ENABLED(CONFIG_AGP)
@@ -605,11 +628,19 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
if (gtt == NULL) {
return NULL;
}
- if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
+
+ if (rbo->flags & RADEON_GEM_GTT_UC)
+ caching = ttm_uncached;
+ else if (rbo->flags & RADEON_GEM_GTT_WC)
+ caching = ttm_write_combined;
+ else
+ caching = ttm_cached;
+
+ if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags, caching)) {
kfree(gtt);
return NULL;
}
- return &gtt->ttm.ttm;
+ return &gtt->ttm;
}
static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
@@ -622,7 +653,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
if (!ttm)
return NULL;
- return container_of(ttm, struct radeon_ttm_tt, ttm.ttm);
+ return container_of(ttm, struct radeon_ttm_tt, ttm);
}
static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
@@ -639,14 +670,12 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
return -ENOMEM;
ttm->page_flags |= TTM_PAGE_FLAG_SG;
- ttm_tt_set_populated(ttm);
return 0;
}
if (slave && ttm->sg) {
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
gtt->ttm.dma_address, ttm->num_pages);
- ttm_tt_set_populated(ttm);
return 0;
}
@@ -793,19 +822,22 @@ bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev,
return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
}
+static void
+radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ radeon_bo_move_notify(bo, false, NULL);
+}
+
static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
- .ttm_tt_bind = &radeon_ttm_tt_bind,
- .ttm_tt_unbind = &radeon_ttm_tt_unbind,
.ttm_tt_destroy = &radeon_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
.move = &radeon_bo_move,
.verify_access = &radeon_verify_access,
- .move_notify = &radeon_bo_move_notify,
- .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
+ .delete_mem_notify = &radeon_bo_delete_mem_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
};
@@ -906,17 +938,29 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
{
- struct ttm_buffer_object *bo;
- struct radeon_device *rdev;
+ struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
+ struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
vm_fault_t ret;
- bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL)
- return VM_FAULT_NOPAGE;
-
- rdev = radeon_get_rdev(bo->bdev);
down_read(&rdev->pm.mclk_lock);
- ret = ttm_bo_vm_fault(vmf);
+
+ ret = ttm_bo_vm_reserve(bo, vmf);
+ if (ret)
+ goto unlock_mclk;
+
+ ret = radeon_bo_fault_reserve_notify(bo);
+ if (ret)
+ goto unlock_resv;
+
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT, 1);
+ if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
+ goto unlock_mclk;
+
+unlock_resv:
+ dma_resv_unlock(bo->base.resv);
+
+unlock_mclk:
up_read(&rdev->pm.mclk_lock);
return ret;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index fe86a3e67757..4c360a255849 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -710,7 +710,7 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
}
static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
@@ -748,8 +748,10 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
struct rcar_du_device *rcdu = rcrtc->dev;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 0f3eb392fe39..b7654f5e4225 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -212,15 +212,10 @@ static const struct file_operations rockchip_drm_driver_fops = {
static struct drm_driver rockchip_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .gem_free_object_unlocked = rockchip_gem_free_object,
.dumb_create = rockchip_gem_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
.gem_prime_import_sg_table = rockchip_gem_prime_import_sg_table,
- .gem_prime_vmap = rockchip_gem_prime_vmap,
- .gem_prime_vunmap = rockchip_gem_prime_vunmap,
.gem_prime_mmap = rockchip_gem_mmap_buf,
.fops = &rockchip_drm_driver_fops,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 62e5d0970525..7d5ebb10323b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -10,6 +10,7 @@
#include <drm/drm.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_vma_manager.h>
@@ -295,6 +296,14 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj)
kfree(rk_obj);
}
+static const struct drm_gem_object_funcs rockchip_gem_object_funcs = {
+ .free = rockchip_gem_free_object,
+ .get_sg_table = rockchip_gem_prime_get_sg_table,
+ .vmap = rockchip_gem_prime_vmap,
+ .vunmap = rockchip_gem_prime_vunmap,
+ .vm_ops = &drm_gem_cma_vm_ops,
+};
+
static struct rockchip_gem_object *
rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size)
{
@@ -309,6 +318,8 @@ static struct rockchip_gem_object *
obj = &rk_obj->base;
+ obj->funcs = &rockchip_gem_object_funcs;
+
drm_gem_object_init(drm, obj, size);
return rk_obj;
@@ -337,7 +348,7 @@ err_free_rk_obj:
}
/*
- * rockchip_gem_free_object - (struct drm_driver)->gem_free_object_unlocked
+ * rockchip_gem_free_object - (struct drm_gem_object_funcs)->free
* callback function
*/
void rockchip_gem_free_object(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c80f7d9fd13f..47835715b44b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -693,7 +693,7 @@ static void rockchip_drm_set_win_enabled(struct drm_crtc *crtc, bool enabled)
}
static void vop_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct vop *vop = to_vop(crtc);
@@ -1260,8 +1260,10 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
}
static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct vop *vop = to_vop(crtc);
const struct vop_data *vop_data = vop->data;
struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc->state);
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 146380118962..f8ec277a6aa8 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -73,6 +73,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
init_completion(&entity->entity_idle);
+ /* We start in an idle state. */
+ complete(&entity->entity_idle);
+
spin_lock_init(&entity->rq_lock);
spsc_queue_init(&entity->job_queue);
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 6f37c104c46f..5726746f6d18 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -23,7 +23,7 @@
#include "sti_vtg.h"
static void sti_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
@@ -35,7 +35,7 @@ static void sti_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void sti_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct sti_mixer *mixer = to_sti_mixer(crtc);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 6e28f707092f..e9af92d4a74b 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -420,7 +420,7 @@ static void ltdc_crtc_update_clut(struct drm_crtc *crtc)
}
static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
@@ -442,7 +442,7 @@ static void ltdc_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void ltdc_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
struct drm_device *ddev = crtc->dev;
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 3a153648b369..999deb64bd70 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -101,7 +101,7 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
}
static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
@@ -122,7 +122,7 @@ static void sun4i_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void sun4i_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_encoder *encoder = sun4i_crtc_get_encoder(crtc);
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 424ad60b4f38..52acc2f8f798 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1742,7 +1742,7 @@ static int tegra_dc_wait_idle(struct tegra_dc *dc, unsigned long timeout)
}
static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
@@ -1799,10 +1799,10 @@ static void tegra_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- struct tegra_dc_state *state = to_dc_state(crtc->state);
+ struct tegra_dc_state *crtc_state = to_dc_state(crtc->state);
struct tegra_dc *dc = to_tegra_dc(crtc);
u32 value;
int err;
@@ -1882,7 +1882,7 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
/* apply PLL and pixel clock changes */
- tegra_dc_commit_state(dc, state);
+ tegra_dc_commit_state(dc, crtc_state);
/* program display mode */
tegra_dc_set_timings(dc, mode);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index ba9d1c3e7cac..f0f581cd345e 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -858,12 +858,8 @@ static struct drm_driver tegra_drm_driver = {
.debugfs_init = tegra_debugfs_init,
#endif
- .gem_free_object_unlocked = tegra_bo_free_object,
- .gem_vm_ops = &tegra_bo_vm_ops,
-
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = tegra_gem_prime_export,
.gem_prime_import = tegra_gem_prime_import,
.dumb_create = tegra_bo_dumb_create,
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index a2bac20ff19d..26af8daa9a16 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -132,24 +132,29 @@ static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt)
static void *tegra_bo_mmap(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct dma_buf_map map;
+ int ret;
- if (obj->vaddr)
+ if (obj->vaddr) {
return obj->vaddr;
- else if (obj->gem.import_attach)
- return dma_buf_vmap(obj->gem.import_attach->dmabuf);
- else
+ } else if (obj->gem.import_attach) {
+ ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
+ return ret ? NULL : map.vaddr;
+ } else {
return vmap(obj->pages, obj->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
+ }
}
static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(addr);
if (obj->vaddr)
return;
else if (obj->gem.import_attach)
- dma_buf_vunmap(obj->gem.import_attach->dmabuf, addr);
+ dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
else
vunmap(addr);
}
@@ -230,6 +235,12 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
return 0;
}
+static const struct drm_gem_object_funcs tegra_gem_object_funcs = {
+ .free = tegra_bo_free_object,
+ .export = tegra_gem_prime_export,
+ .vm_ops = &tegra_bo_vm_ops,
+};
+
static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
size_t size)
{
@@ -240,6 +251,8 @@ static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
if (!bo)
return ERR_PTR(-ENOMEM);
+ bo->gem.funcs = &tegra_gem_object_funcs;
+
host1x_bo_init(&bo->base, &tegra_bo_ops);
size = round_up(size, PAGE_SIZE);
@@ -634,15 +647,17 @@ static int tegra_gem_prime_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
return __tegra_gem_mmap(gem, vma);
}
-static void *tegra_gem_prime_vmap(struct dma_buf *buf)
+static int tegra_gem_prime_vmap(struct dma_buf *buf, struct dma_buf_map *map)
{
struct drm_gem_object *gem = buf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
- return bo->vaddr;
+ dma_buf_map_set_vaddr(map, bo->vaddr);
+
+ return 0;
}
-static void tegra_gem_prime_vunmap(struct dma_buf *buf, void *vaddr)
+static void tegra_gem_prime_vunmap(struct dma_buf *buf, struct dma_buf_map *map)
{
}
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 3c5744a91d4a..848b9c7b553d 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -212,8 +212,10 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
}
static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
struct tidss_device *tidss = to_tidss(ddev);
@@ -255,7 +257,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 43e72d0b2d84..35067ae674ea 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -10,6 +10,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include "tidss_crtc.h"
#include "tidss_dispc.h"
@@ -150,6 +151,7 @@ static void drm_plane_destroy(struct drm_plane *plane)
}
static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
+ .prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = tidss_plane_atomic_check,
.atomic_update = tidss_plane_atomic_update,
.atomic_disable = tidss_plane_atomic_disable,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 518220bd092a..da2ab2aa3577 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -147,12 +147,9 @@ static void tilcdc_crtc_enable_irqs(struct drm_device *dev)
tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
LCDC_V1_SYNC_LOST_INT_ENA | LCDC_V1_FRAME_DONE_INT_ENA |
LCDC_V1_UNDERFLOW_INT_ENA);
- tilcdc_set(dev, LCDC_DMA_CTRL_REG,
- LCDC_V1_END_OF_FRAME_INT_ENA);
} else {
tilcdc_write(dev, LCDC_INT_ENABLE_SET_REG,
LCDC_V2_UNDERFLOW_INT_ENA |
- LCDC_V2_END_OF_FRAME0_INT_ENA |
LCDC_FRAME_DONE | LCDC_SYNC_LOST);
}
}
@@ -484,7 +481,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
}
static void tilcdc_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
tilcdc_crtc_enable(crtc);
}
@@ -532,7 +529,7 @@ static void tilcdc_crtc_disable(struct drm_crtc *crtc)
}
static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
tilcdc_crtc_disable(crtc);
}
@@ -678,11 +675,44 @@ static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
static int tilcdc_crtc_enable_vblank(struct drm_crtc *crtc)
{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+ tilcdc_clear_irqstatus(dev, LCDC_END_OF_FRAME0);
+
+ if (priv->rev == 1)
+ tilcdc_set(dev, LCDC_DMA_CTRL_REG,
+ LCDC_V1_END_OF_FRAME_INT_ENA);
+ else
+ tilcdc_set(dev, LCDC_INT_ENABLE_SET_REG,
+ LCDC_V2_END_OF_FRAME0_INT_ENA);
+
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+
return 0;
}
static void tilcdc_crtc_disable_vblank(struct drm_crtc *crtc)
{
+ struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+
+ if (priv->rev == 1)
+ tilcdc_clear(dev, LCDC_DMA_CTRL_REG,
+ LCDC_V1_END_OF_FRAME_INT_ENA);
+ else
+ tilcdc_clear(dev, LCDC_INT_ENABLE_SET_REG,
+ LCDC_V2_END_OF_FRAME0_INT_ENA);
+
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
}
static void tilcdc_crtc_reset(struct drm_crtc *crtc)
@@ -724,20 +754,6 @@ static const struct drm_crtc_funcs tilcdc_crtc_funcs = {
.disable_vblank = tilcdc_crtc_disable_vblank,
};
-int tilcdc_crtc_max_width(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct tilcdc_drm_private *priv = dev->dev_private;
- int max_width = 0;
-
- if (priv->rev == 1)
- max_width = 1024;
- else if (priv->rev == 2)
- max_width = 2048;
-
- return max_width;
-}
-
static enum drm_mode_status
tilcdc_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -750,7 +766,7 @@ tilcdc_crtc_mode_valid(struct drm_crtc *crtc,
* check to see if the width is within the range that
* the LCD Controller physically supports
*/
- if (mode->hdisplay > tilcdc_crtc_max_width(crtc))
+ if (mode->hdisplay > priv->max_width)
return MODE_VIRTUAL_X;
/* width must be multiple of 16 */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 4f5fc3e87383..c5f82e693f1a 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -105,7 +105,7 @@ static void modeset_init(struct drm_device *dev)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
- dev->mode_config.max_width = tilcdc_crtc_max_width(priv->crtc);
+ dev->mode_config.max_width = priv->max_width;
dev->mode_config.max_height = 2048;
dev->mode_config.funcs = &mode_config_funcs;
}
@@ -218,22 +218,6 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
goto init_failed;
}
- if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
- priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH;
-
- DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
-
- if (of_property_read_u32(node, "max-width", &priv->max_width))
- priv->max_width = TILCDC_DEFAULT_MAX_WIDTH;
-
- DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
-
- if (of_property_read_u32(node, "max-pixelclock",
- &priv->max_pixelclock))
- priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
-
- DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
-
pm_runtime_enable(dev);
/* Determine LCD IP Version */
@@ -287,6 +271,26 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
}
}
+ if (of_property_read_u32(node, "max-bandwidth", &priv->max_bandwidth))
+ priv->max_bandwidth = TILCDC_DEFAULT_MAX_BANDWIDTH;
+
+ DBG("Maximum Bandwidth Value %d", priv->max_bandwidth);
+
+ if (of_property_read_u32(node, "max-width", &priv->max_width)) {
+ if (priv->rev == 1)
+ priv->max_width = TILCDC_DEFAULT_MAX_WIDTH_V1;
+ else
+ priv->max_width = TILCDC_DEFAULT_MAX_WIDTH_V2;
+ }
+
+ DBG("Maximum Horizontal Pixel Width Value %dpixels", priv->max_width);
+
+ if (of_property_read_u32(node, "max-pixelclock",
+ &priv->max_pixelclock))
+ priv->max_pixelclock = TILCDC_DEFAULT_MAX_PIXELCLOCK;
+
+ DBG("Maximum Pixel Clock Value %dKHz", priv->max_pixelclock);
+
ret = tilcdc_crtc_create(ddev);
if (ret < 0) {
dev_err(dev, "failed to create crtc\n");
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index 18815e75ca4f..d29806ca8817 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -28,8 +28,10 @@ struct drm_plane;
/* Defaulting to pixel clock defined on AM335x */
#define TILCDC_DEFAULT_MAX_PIXELCLOCK 126000
-/* Defaulting to max width as defined on AM335x */
-#define TILCDC_DEFAULT_MAX_WIDTH 2048
+/* Maximum display width for LCDC V1 */
+#define TILCDC_DEFAULT_MAX_WIDTH_V1 1024
+/* ... and for LCDC V2 found on AM335x: */
+#define TILCDC_DEFAULT_MAX_WIDTH_V2 2048
/*
* This may need some tweaking, but want to allow at least 1280x1024@60
* with optimized DDR & EMIF settings tweaked 1920x1080@24 appears to
@@ -158,7 +160,6 @@ void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
const struct tilcdc_panel_info *info);
void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync);
-int tilcdc_crtc_max_width(struct drm_crtc *crtc);
void tilcdc_crtc_shutdown(struct drm_crtc *crtc);
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index a98fd795b752..4f76c9287159 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -54,7 +54,7 @@ int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
- int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ int ret, cached = ttm->caching == ttm_cached;
unsigned i;
if (agp_be->mem)
@@ -136,7 +136,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo,
agp_be->mem = NULL;
agp_be->bridge = bridge;
- if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) {
+ if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined)) {
kfree(agp_be);
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index eb4b7df02ca0..f51b5e20fa17 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -115,10 +115,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_resource_manager *man;
- if (!list_empty(&bo->lru))
- return;
-
- if (mem->placement & TTM_PL_FLAG_NO_EVICT)
+ if (!list_empty(&bo->lru) || bo->pin_count)
return;
man = ttm_manager_type(bdev, mem->mem_type);
@@ -165,7 +162,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
ttm_bo_del_from_lru(bo);
ttm_bo_add_mem_to_lru(bo, &bo->mem);
- if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+ if (bulk && !bo->pin_count) {
switch (bo->mem.mem_type) {
case TTM_PL_TT:
ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
@@ -255,49 +252,17 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
if (ret)
goto out_err;
- ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
- if (ret)
- goto out_err;
-
if (mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_tt_populate(bdev, bo->ttm, ctx);
+ ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
if (ret)
goto out_err;
-
- ret = ttm_bo_tt_bind(bo, mem);
- if (ret)
- goto out_err;
- }
-
- if (bo->mem.mem_type == TTM_PL_SYSTEM) {
- if (bdev->driver->move_notify)
- bdev->driver->move_notify(bo, evict, mem);
- bo->mem = *mem;
- goto moved;
}
}
- if (bdev->driver->move_notify)
- bdev->driver->move_notify(bo, evict, mem);
-
- if (old_man->use_tt && new_man->use_tt)
- ret = ttm_bo_move_ttm(bo, ctx, mem);
- else if (bdev->driver->move)
- ret = bdev->driver->move(bo, evict, ctx, mem);
- else
- ret = ttm_bo_move_memcpy(bo, ctx, mem);
-
- if (ret) {
- if (bdev->driver->move_notify) {
- swap(*mem, bo->mem);
- bdev->driver->move_notify(bo, false, mem);
- swap(*mem, bo->mem);
- }
-
+ ret = bdev->driver->move(bo, evict, ctx, mem);
+ if (ret)
goto out_err;
- }
-moved:
ctx->bytes_moved += bo->num_pages << PAGE_SHIFT;
return 0;
@@ -319,8 +284,8 @@ out_err:
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
- if (bo->bdev->driver->move_notify)
- bo->bdev->driver->move_notify(bo, false, NULL);
+ if (bo->bdev->driver->delete_mem_notify)
+ bo->bdev->driver->delete_mem_notify(bo);
ttm_bo_tt_destroy(bo);
ttm_resource_free(bo, &bo->mem);
@@ -540,12 +505,12 @@ static void ttm_bo_release(struct kref *kref)
spin_lock(&ttm_bo_glob.lru_lock);
/*
- * Make NO_EVICT bos immediately available to
+ * Make pinned bos immediately available to
* shrinkers, now that they are queued for
* destruction.
*/
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+ if (bo->pin_count) {
+ bo->pin_count = 0;
ttm_bo_del_from_lru(bo);
ttm_bo_add_mem_to_lru(bo, &bo->mem);
}
@@ -860,35 +825,11 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
}
-static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
- uint32_t cur_placement,
- uint32_t proposed_placement)
-{
- uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
- uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
-
- /**
- * Keep current caching if possible.
- */
-
- if ((cur_placement & caching) != 0)
- result |= (cur_placement & caching);
- else if ((TTM_PL_FLAG_CACHED & caching) != 0)
- result |= TTM_PL_FLAG_CACHED;
- else if ((TTM_PL_FLAG_WC & caching) != 0)
- result |= TTM_PL_FLAG_WC;
- else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
- result |= TTM_PL_FLAG_UNCACHED;
-
- return result;
-}
-
/**
* ttm_bo_mem_placement - check if placement is compatible
* @bo: BO to find memory for
* @place: where to search
* @mem: the memory object to fill in
- * @ctx: operation context
*
* Check if placement is compatible and fill in mem structure.
* Returns -EBUSY if placement won't work or negative error code.
@@ -896,23 +837,17 @@ static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man,
*/
static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
const struct ttm_place *place,
- struct ttm_resource *mem,
- struct ttm_operation_ctx *ctx)
+ struct ttm_resource *mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_resource_manager *man;
- uint32_t cur_flags = 0;
man = ttm_manager_type(bdev, place->mem_type);
if (!man || !ttm_resource_manager_used(man))
return -EBUSY;
- cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
- place->flags);
- cur_flags |= place->flags & ~TTM_PL_MASK_CACHING;
-
mem->mem_type = place->mem_type;
- mem->placement = cur_flags;
+ mem->placement = place->flags;
spin_lock(&ttm_bo_glob.lru_lock);
ttm_bo_del_from_lru(bo);
@@ -947,7 +882,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
const struct ttm_place *place = &placement->placement[i];
struct ttm_resource_manager *man;
- ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+ ret = ttm_bo_mem_placement(bo, place, mem);
if (ret)
continue;
@@ -973,7 +908,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
- ret = ttm_bo_mem_placement(bo, place, mem, ctx);
+ ret = ttm_bo_mem_placement(bo, place, mem);
if (ret)
continue;
@@ -1045,8 +980,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places,
continue;
*new_flags = heap->flags;
- if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
- (mem->mem_type == heap->mem_type) &&
+ if ((mem->mem_type == heap->mem_type) &&
(!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
(mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
@@ -1100,9 +1034,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
- } else {
- bo->mem.placement &= TTM_PL_MASK_CACHING;
- bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING;
}
/*
* We might need to add a TTM.
@@ -1170,8 +1101,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->mem.bus.offset = 0;
bo->mem.bus.addr = NULL;
bo->moving = NULL;
- bo->mem.placement = TTM_PL_FLAG_CACHED;
+ bo->mem.placement = 0;
bo->acc_size = acc_size;
+ bo->pin_count = 0;
bo->sg = sg;
if (resv) {
bo->base.resv = resv;
@@ -1251,19 +1183,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
}
EXPORT_SYMBOL(ttm_bo_init);
-static size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size)
-{
- unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
- size_t size = 0;
-
- size += ttm_round_pot(struct_size);
- size += ttm_round_pot(npages * sizeof(void *));
- size += ttm_round_pot(sizeof(struct ttm_tt));
- return size;
-}
-
size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
unsigned long bo_size,
unsigned struct_size)
@@ -1273,56 +1192,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
size += ttm_round_pot(struct_size);
size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
- size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+ size += ttm_round_pot(sizeof(struct ttm_tt));
return size;
}
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
-int ttm_bo_create(struct ttm_bo_device *bdev,
- unsigned long size,
- enum ttm_bo_type type,
- struct ttm_placement *placement,
- uint32_t page_alignment,
- bool interruptible,
- struct ttm_buffer_object **p_bo)
-{
- struct ttm_buffer_object *bo;
- size_t acc_size;
- int ret;
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (unlikely(bo == NULL))
- return -ENOMEM;
-
- acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
- ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
- interruptible, acc_size,
- NULL, NULL, NULL);
- if (likely(ret == 0))
- *p_bo = bo;
-
- return ret;
-}
-EXPORT_SYMBOL(ttm_bo_create);
-
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
-{
- struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type);
-
- if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
- pr_err("Illegal memory manager memory type %u\n", mem_type);
- return -EINVAL;
- }
-
- if (!man) {
- pr_err("Memory type %u has not been initialized\n", mem_type);
- return 0;
- }
-
- return ttm_resource_manager_force_list_clean(bdev, man);
-}
-EXPORT_SYMBOL(ttm_bo_evict_mm);
-
static void ttm_bo_global_kobj_release(struct kobject *kobj)
{
struct ttm_bo_global *glob =
@@ -1506,8 +1380,9 @@ EXPORT_SYMBOL(ttm_bo_wait);
* A buffer object shrink method that tries to swap out the first
* buffer object on the bo_global::swap_lru list.
*/
-int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
+int ttm_bo_swapout(struct ttm_operation_ctx *ctx)
{
+ struct ttm_bo_global *glob = &ttm_bo_glob;
struct ttm_buffer_object *bo;
int ret = -EBUSY;
bool locked;
@@ -1551,14 +1426,13 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
* Move to system cached
*/
- if (bo->mem.mem_type != TTM_PL_SYSTEM ||
- bo->ttm->caching_state != tt_cached) {
+ if (bo->mem.mem_type != TTM_PL_SYSTEM) {
struct ttm_operation_ctx ctx = { false, false };
struct ttm_resource evict_mem;
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
- evict_mem.placement = TTM_PL_FLAG_CACHED;
+ evict_mem.placement = 0;
evict_mem.mem_type = TTM_PL_SYSTEM;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx);
@@ -1584,7 +1458,7 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
if (bo->bdev->driver->swap_notify)
bo->bdev->driver->swap_notify(bo);
- ret = ttm_tt_swapout(bo->bdev, bo->ttm, bo->persistent_swap_storage);
+ ret = ttm_tt_swapout(bo->bdev, bo->ttm);
out:
/**
@@ -1599,17 +1473,6 @@ out:
}
EXPORT_SYMBOL(ttm_bo_swapout);
-void ttm_bo_swapout_all(void)
-{
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
-
- while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
-}
-EXPORT_SYMBOL(ttm_bo_swapout_all);
-
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
{
if (bo->ttm == NULL)
@@ -1619,12 +1482,3 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
bo->ttm = NULL;
}
-int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem)
-{
- return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem);
-}
-
-void ttm_bo_tt_unbind(struct ttm_buffer_object *bo)
-{
- bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm);
-}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fb2a25f8408f..ecb54415d1ca 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -45,53 +45,6 @@ struct ttm_transfer_obj {
struct ttm_buffer_object *bo;
};
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
-{
- ttm_resource_free(bo, &bo->mem);
-}
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem)
-{
- struct ttm_tt *ttm = bo->ttm;
- struct ttm_resource *old_mem = &bo->mem;
- int ret;
-
- if (old_mem->mem_type != TTM_PL_SYSTEM) {
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
-
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- pr_err("Failed to expire sync object before unbinding TTM\n");
- return ret;
- }
-
- ttm_bo_tt_unbind(bo);
- ttm_bo_free_old_node(bo);
- old_mem->mem_type = TTM_PL_SYSTEM;
- }
-
- ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
- if (unlikely(ret != 0))
- return ret;
-
- if (new_mem->mem_type != TTM_PL_SYSTEM) {
-
- ret = ttm_tt_populate(bo->bdev, ttm, ctx);
- if (unlikely(ret != 0))
- return ret;
-
- ret = ttm_bo_tt_bind(bo, new_mem);
- if (unlikely(ret != 0))
- return ret;
- }
-
- ttm_bo_assign_mem(bo, new_mem);
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_move_ttm);
-
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_resource *mem)
{
@@ -135,7 +88,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev,
} else {
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
- if (mem->placement & TTM_PL_FLAG_WC)
+ if (mem->bus.caching == ttm_write_combined)
addr = ioremap_wc(mem->bus.offset, bus_size);
else
addr = ioremap(mem->bus.offset, bus_size);
@@ -227,11 +180,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
void *new_iomap;
int ret;
unsigned long i;
- unsigned long page;
- unsigned long add = 0;
- int dir;
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+ ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
return ret;
@@ -267,29 +217,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
goto out1;
}
- add = 0;
- dir = 1;
-
- if ((old_mem->mem_type == new_mem->mem_type) &&
- (new_mem->start < old_mem->start + old_mem->size)) {
- dir = -1;
- add = new_mem->num_pages - 1;
- }
-
for (i = 0; i < new_mem->num_pages; ++i) {
- page = i * dir + add;
if (old_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(old_mem->placement,
- PAGE_KERNEL);
- ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+ pgprot_t prot = ttm_io_prot(bo, old_mem, PAGE_KERNEL);
+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, i,
prot);
} else if (new_iomap == NULL) {
- pgprot_t prot = ttm_io_prot(new_mem->placement,
- PAGE_KERNEL);
- ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+ pgprot_t prot = ttm_io_prot(bo, new_mem, PAGE_KERNEL);
+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, i,
prot);
} else {
- ret = ttm_copy_io_page(new_iomap, old_iomap, page);
+ ret = ttm_copy_io_page(new_iomap, old_iomap, i);
}
if (ret)
goto out1;
@@ -352,7 +290,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
return -ENOMEM;
fbo->base = *bo;
- fbo->base.mem.placement |= TTM_PL_FLAG_NO_EVICT;
ttm_bo_get(bo);
fbo->bo = bo;
@@ -372,6 +309,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
+ fbo->base.pin_count = 1;
if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
@@ -384,21 +322,28 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
return 0;
}
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp)
{
+ struct ttm_resource_manager *man;
+ enum ttm_caching caching;
+
+ man = ttm_manager_type(bo->bdev, res->mem_type);
+ caching = man->use_tt ? bo->ttm->caching : res->bus.caching;
+
/* Cached mappings need no adjustment */
- if (caching_flags & TTM_PL_FLAG_CACHED)
+ if (caching == ttm_cached)
return tmp;
#if defined(__i386__) || defined(__x86_64__)
- if (caching_flags & TTM_PL_FLAG_WC)
+ if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
#endif
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__mips__)
- if (caching_flags & TTM_PL_FLAG_WC)
+ if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
@@ -422,7 +367,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
} else {
map->bo_kmap_type = ttm_bo_map_iomap;
- if (mem->placement & TTM_PL_FLAG_WC)
+ if (mem->bus.caching == ttm_write_combined)
map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
size);
else
@@ -452,7 +397,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
if (ret)
return ret;
- if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
+ if (num_pages == 1 && ttm->caching == ttm_cached) {
/*
* We're mapping a single page, and the desired
* page protection is consistent with the bo.
@@ -466,7 +411,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
*/
- prot = ttm_io_prot(mem->placement, PAGE_KERNEL);
+ prot = ttm_io_prot(bo, mem, PAGE_KERNEL);
map->bo_kmap_type = ttm_bo_map_vmap;
map->virtual = vmap(ttm->pages + start_page, num_pages,
0, prot);
@@ -536,7 +481,7 @@ static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
if (!dst_use_tt)
ttm_bo_tt_destroy(bo);
- ttm_bo_free_old_node(bo);
+ ttm_resource_free(bo, &bo->mem);
return 0;
}
@@ -597,7 +542,7 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
}
spin_unlock(&from->move_lock);
- ttm_bo_free_old_node(bo);
+ ttm_resource_free(bo, &bo->mem);
dma_fence_put(bo->moving);
bo->moving = dma_fence_get(fence);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 98a006fc30a5..eeaca5d1efe3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -157,6 +157,15 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
return VM_FAULT_NOPAGE;
}
+ /*
+ * Refuse to fault imported pages. This should be handled
+ * (if at all) by redirecting mmap to the exporter.
+ */
+ if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
+
return 0;
}
EXPORT_SYMBOL(ttm_bo_vm_reserve);
@@ -282,35 +291,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
unsigned long address = vmf->address;
/*
- * Refuse to fault imported pages. This should be handled
- * (if at all) by redirecting mmap to the exporter.
- */
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
- return VM_FAULT_SIGBUS;
-
- if (bdev->driver->fault_reserve_notify) {
- struct dma_fence *moving = dma_fence_get(bo->moving);
-
- err = bdev->driver->fault_reserve_notify(bo);
- switch (err) {
- case 0:
- break;
- case -EBUSY:
- case -ERESTARTSYS:
- dma_fence_put(moving);
- return VM_FAULT_NOPAGE;
- default:
- dma_fence_put(moving);
- return VM_FAULT_SIGBUS;
- }
-
- if (bo->moving != moving) {
- ttm_bo_move_to_lru_tail_unlocked(bo);
- }
- dma_fence_put(moving);
- }
-
- /*
* Wait for buffer data in transit, due to a pipelined
* move.
*/
@@ -330,7 +310,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
if (unlikely(page_offset >= bo->num_pages))
return VM_FAULT_SIGBUS;
- prot = ttm_io_prot(bo->mem.placement, prot);
+ prot = ttm_io_prot(bo, &bo->mem, prot);
if (!bo->mem.bus.is_iomem) {
struct ttm_operation_ctx ctx = {
.interruptible = false,
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 89d50f38c0f2..69cf622e79e5 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
spin_unlock(&glob->lock);
- ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
+ ret = ttm_bo_swapout(ctx);
spin_lock(&glob->lock);
if (unlikely(ret != 0))
break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 14660f723f71..29e6c29ad60e 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -47,7 +47,8 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
+
+#include "ttm_set_memory.h"
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 16
@@ -219,14 +220,14 @@ static struct ttm_pool_manager *_manager;
/**
* Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
- enum ttm_caching_state cstate)
+ enum ttm_caching cstate)
{
int pool_index;
- if (cstate == tt_cached)
+ if (cstate == ttm_cached)
return NULL;
- if (cstate == tt_wc)
+ if (cstate == ttm_write_combined)
pool_index = 0x0;
else
pool_index = 0x1;
@@ -440,17 +441,17 @@ static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
}
static int ttm_set_pages_caching(struct page **pages,
- enum ttm_caching_state cstate, unsigned cpages)
+ enum ttm_caching cstate, unsigned cpages)
{
int r = 0;
/* Set page caching */
switch (cstate) {
- case tt_uncached:
+ case ttm_uncached:
r = ttm_set_pages_array_uc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to uc!\n", cpages);
break;
- case tt_wc:
+ case ttm_write_combined:
r = ttm_set_pages_array_wc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to wc!\n", cpages);
@@ -466,11 +467,11 @@ static int ttm_set_pages_caching(struct page **pages,
* any pages that have changed their caching state already put them to the
* pool.
*/
-static void ttm_handle_caching_state_failure(struct list_head *pages,
- int ttm_flags, enum ttm_caching_state cstate,
- struct page **failed_pages, unsigned cpages)
+static void ttm_handle_caching_failure(struct page **failed_pages,
+ unsigned cpages)
{
unsigned i;
+
/* Failed pages have to be freed */
for (i = 0; i < cpages; ++i) {
list_del(&failed_pages[i]->lru);
@@ -485,7 +486,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
* pages returned in pages array.
*/
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
- int ttm_flags, enum ttm_caching_state cstate,
+ int ttm_flags, enum ttm_caching cstate,
unsigned count, unsigned order)
{
struct page **caching_array;
@@ -516,9 +517,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
r = ttm_set_pages_caching(caching_array,
cstate, cpages);
if (r)
- ttm_handle_caching_state_failure(pages,
- ttm_flags, cstate,
- caching_array, cpages);
+ ttm_handle_caching_failure(caching_array,
+ cpages);
}
r = -ENOMEM;
goto out;
@@ -541,9 +541,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
r = ttm_set_pages_caching(caching_array,
cstate, cpages);
if (r) {
- ttm_handle_caching_state_failure(pages,
- ttm_flags, cstate,
- caching_array, cpages);
+ ttm_handle_caching_failure(caching_array,
+ cpages);
goto out;
}
cpages = 0;
@@ -554,9 +553,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
if (cpages) {
r = ttm_set_pages_caching(caching_array, cstate, cpages);
if (r)
- ttm_handle_caching_state_failure(pages,
- ttm_flags, cstate,
- caching_array, cpages);
+ ttm_handle_caching_failure(caching_array, cpages);
}
out:
kfree(caching_array);
@@ -569,7 +566,7 @@ out:
* pages is small.
*/
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
- enum ttm_caching_state cstate,
+ enum ttm_caching cstate,
unsigned count, unsigned long *irq_flags)
{
struct page *p;
@@ -629,7 +626,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages,
int ttm_flags,
- enum ttm_caching_state cstate,
+ enum ttm_caching cstate,
unsigned count, unsigned order)
{
unsigned long irq_flags;
@@ -706,7 +703,7 @@ out:
/* Put all pages in pages list to correct pool to wait for reuse */
static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate)
+ enum ttm_caching cstate)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -824,7 +821,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
* cached pages.
*/
static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
- enum ttm_caching_state cstate)
+ enum ttm_caching cstate)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -1043,8 +1040,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
put_pages:
ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
- ttm->caching_state);
- ttm_tt_set_unpopulated(ttm);
+ ttm->caching);
}
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
@@ -1060,7 +1056,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
return -ENOMEM;
ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags,
- ttm->caching_state);
+ ttm->caching);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate_helper(ttm, 0);
return ret;
@@ -1075,15 +1071,6 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
}
}
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0)) {
- ttm_pool_unpopulate(ttm);
- return ret;
- }
- }
-
- ttm_tt_set_populated(ttm);
return 0;
}
EXPORT_SYMBOL(ttm_pool_populate);
@@ -1094,28 +1081,28 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx)
{
unsigned i, j;
int r;
- r = ttm_pool_populate(&tt->ttm, ctx);
+ r = ttm_pool_populate(tt, ctx);
if (r)
return r;
- for (i = 0; i < tt->ttm.num_pages; ++i) {
- struct page *p = tt->ttm.pages[i];
+ for (i = 0; i < tt->num_pages; ++i) {
+ struct page *p = tt->pages[i];
size_t num_pages = 1;
- for (j = i + 1; j < tt->ttm.num_pages; ++j) {
- if (++p != tt->ttm.pages[j])
+ for (j = i + 1; j < tt->num_pages; ++j) {
+ if (++p != tt->pages[j])
break;
++num_pages;
}
- tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
+ tt->dma_address[i] = dma_map_page(dev, tt->pages[i],
0, num_pages * PAGE_SIZE,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, tt->dma_address[i])) {
@@ -1124,7 +1111,7 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
PAGE_SIZE, DMA_BIDIRECTIONAL);
tt->dma_address[i] = 0;
}
- ttm_pool_unpopulate(&tt->ttm);
+ ttm_pool_unpopulate(tt);
return -EFAULT;
}
@@ -1137,21 +1124,21 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
}
EXPORT_SYMBOL(ttm_populate_and_map_pages);
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt)
{
unsigned i, j;
- for (i = 0; i < tt->ttm.num_pages;) {
- struct page *p = tt->ttm.pages[i];
+ for (i = 0; i < tt->num_pages;) {
+ struct page *p = tt->pages[i];
size_t num_pages = 1;
- if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
+ if (!tt->dma_address[i] || !tt->pages[i]) {
++i;
continue;
}
- for (j = i + 1; j < tt->ttm.num_pages; ++j) {
- if (++p != tt->ttm.pages[j])
+ for (j = i + 1; j < tt->num_pages; ++j) {
+ if (++p != tt->pages[j])
break;
++num_pages;
@@ -1162,7 +1149,7 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
i += num_pages;
}
- ttm_pool_unpopulate(&tt->ttm);
+ ttm_pool_unpopulate(tt);
}
EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 5e2df11685e7..c0353c25efd6 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -49,7 +49,8 @@
#include <linux/kthread.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
+
+#include "ttm_set_memory.h"
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
@@ -324,15 +325,15 @@ static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
}
return d_page;
}
-static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
+static enum pool_type ttm_to_type(int flags, enum ttm_caching cstate)
{
enum pool_type type = IS_UNDEFINED;
if (flags & TTM_PAGE_FLAG_DMA32)
type |= IS_DMA32;
- if (cstate == tt_cached)
+ if (cstate == ttm_cached)
type |= IS_CACHED;
- else if (cstate == tt_uncached)
+ else if (cstate == ttm_uncached)
type |= IS_UC;
else
type |= IS_WC;
@@ -662,7 +663,7 @@ static struct dma_pool *ttm_dma_find_pool(struct device *dev,
* are pages that have changed their caching state already put them to the
* pool.
*/
-static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
+static void ttm_dma_handle_caching_failure(struct dma_pool *pool,
struct list_head *d_pages,
struct page **failed_pages,
unsigned cpages)
@@ -733,7 +734,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
r = ttm_set_pages_caching(pool, caching_array,
cpages);
if (r)
- ttm_dma_handle_caching_state_failure(
+ ttm_dma_handle_caching_failure(
pool, d_pages, caching_array,
cpages);
}
@@ -759,7 +760,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
r = ttm_set_pages_caching(pool, caching_array,
cpages);
if (r) {
- ttm_dma_handle_caching_state_failure(
+ ttm_dma_handle_caching_failure(
pool, d_pages, caching_array,
cpages);
goto out;
@@ -772,7 +773,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
if (cpages) {
r = ttm_set_pages_caching(pool, caching_array, cpages);
if (r)
- ttm_dma_handle_caching_state_failure(pool, d_pages,
+ ttm_dma_handle_caching_failure(pool, d_pages,
caching_array, cpages);
}
out:
@@ -831,11 +832,10 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
* return dma_page pointer if success, otherwise NULL.
*/
static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
- struct ttm_dma_tt *ttm_dma,
+ struct ttm_tt *ttm,
unsigned index)
{
struct dma_page *d_page = NULL;
- struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long irq_flags;
int count;
@@ -844,8 +844,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
if (count) {
d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
ttm->pages[index] = d_page->p;
- ttm_dma->dma_address[index] = d_page->dma;
- list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+ ttm->dma_address[index] = d_page->dma;
+ list_move_tail(&d_page->page_list, &ttm->pages_list);
pool->npages_in_use += 1;
pool->npages_free -= 1;
}
@@ -853,9 +853,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
return d_page;
}
-static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
+static gfp_t ttm_dma_pool_gfp_flags(struct ttm_tt *ttm, bool huge)
{
- struct ttm_tt *ttm = &ttm_dma->ttm;
gfp_t gfp_flags;
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
@@ -882,11 +881,10 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev,
struct ttm_operation_ctx *ctx)
{
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- struct ttm_tt *ttm = &ttm_dma->ttm;
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
struct dma_page *d_page;
@@ -900,10 +898,10 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
return -ENOMEM;
- INIT_LIST_HEAD(&ttm_dma->pages_list);
+ INIT_LIST_HEAD(&ttm->pages_list);
i = 0;
- type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ type = ttm_to_type(ttm->page_flags, ttm->caching);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
@@ -911,7 +909,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
pool = ttm_dma_find_pool(dev, type | IS_HUGE);
if (!pool) {
- gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
+ gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, true);
pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
if (IS_ERR_OR_NULL(pool))
@@ -921,21 +919,21 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
while (num_pages >= HPAGE_PMD_NR) {
unsigned j;
- d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ d_page = ttm_dma_pool_get_pages(pool, ttm, i);
if (!d_page)
break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size, ctx);
if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
+ ttm_dma_unpopulate(ttm, dev);
return -ENOMEM;
}
d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
ttm->pages[j] = ttm->pages[j - 1] + 1;
- ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
+ ttm->dma_address[j] = ttm->dma_address[j - 1] +
PAGE_SIZE;
}
@@ -948,7 +946,7 @@ skip_huge:
pool = ttm_dma_find_pool(dev, type);
if (!pool) {
- gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
+ gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, false);
pool = ttm_dma_pool_init(dev, gfp_flags, type);
if (IS_ERR_OR_NULL(pool))
@@ -956,16 +954,16 @@ skip_huge:
}
while (num_pages) {
- d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ d_page = ttm_dma_pool_get_pages(pool, ttm, i);
if (!d_page) {
- ttm_dma_unpopulate(ttm_dma, dev);
+ ttm_dma_unpopulate(ttm, dev);
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
pool->size, ctx);
if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
+ ttm_dma_unpopulate(ttm, dev);
return -ENOMEM;
}
@@ -974,24 +972,14 @@ skip_huge:
--num_pages;
}
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0)) {
- ttm_dma_unpopulate(ttm_dma, dev);
- return ret;
- }
- }
-
- ttm_tt_set_populated(ttm);
return 0;
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);
/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
{
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
- struct ttm_tt *ttm = &ttm_dma->ttm;
struct dma_pool *pool;
struct dma_page *d_page, *next;
enum pool_type type;
@@ -999,13 +987,13 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
unsigned count, i, npages = 0;
unsigned long irq_flags;
- type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ type = ttm_to_type(ttm->page_flags, ttm->caching);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pool = ttm_dma_find_pool(dev, type | IS_HUGE);
if (pool) {
count = 0;
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
+ list_for_each_entry_safe(d_page, next, &ttm->pages_list,
page_list) {
if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
continue;
@@ -1031,11 +1019,11 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
return;
is_cached = (ttm_dma_find_pool(pool->dev,
- ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+ ttm_to_type(ttm->page_flags, ttm_cached)) == pool);
/* make sure pages array match list and count number of pages */
count = 0;
- list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
+ list_for_each_entry_safe(d_page, next, &ttm->pages_list,
page_list) {
ttm->pages[count] = d_page->p;
count++;
@@ -1056,7 +1044,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
pool->nfrees += count;
} else {
pool->npages_free += count;
- list_splice(&ttm_dma->pages_list, &pool->free_list);
+ list_splice(&ttm->pages_list, &pool->free_list);
/*
* Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
* to free in order to minimize calls to set_memory_wb().
@@ -1067,16 +1055,15 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
spin_unlock_irqrestore(&pool->lock, irq_flags);
- INIT_LIST_HEAD(&ttm_dma->pages_list);
+ INIT_LIST_HEAD(&ttm->pages_list);
for (i = 0; i < ttm->num_pages; i++) {
ttm->pages[i] = NULL;
- ttm_dma->dma_address[i] = 0;
+ ttm->dma_address[i] = 0;
}
/* shrink pool if necessary (only on !is_cached pools)*/
if (npages)
ttm_dma_page_pool_free(pool, npages, false);
- ttm_tt_set_unpopulated(ttm);
}
EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 1da0e277c511..ea77919569a2 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -149,7 +149,7 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev,
ttm_resource_manager_set_used(man, false);
- ret = ttm_resource_manager_force_list_clean(bdev, man);
+ ret = ttm_resource_manager_evict_all(bdev, man);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index b325b9264203..4ebc043e2867 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -75,16 +75,16 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man,
EXPORT_SYMBOL(ttm_resource_manager_init);
/*
- * ttm_resource_manager_force_list_clean
+ * ttm_resource_manager_evict_all
*
* @bdev - device to use
* @man - manager to use
*
- * Force all the objects out of a memory manager until clean.
+ * Evict all the objects out of a memory manager until it is empty.
* Part of memory manager cleanup sequence.
*/
-int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
- struct ttm_resource_manager *man)
+int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
+ struct ttm_resource_manager *man)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
@@ -126,7 +126,7 @@ int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
return 0;
}
-EXPORT_SYMBOL(ttm_resource_manager_force_list_clean);
+EXPORT_SYMBOL(ttm_resource_manager_evict_all);
/**
* ttm_resource_manager_debug
diff --git a/include/drm/ttm/ttm_set_memory.h b/drivers/gpu/drm/ttm/ttm_set_memory.h
index 7c492b49e38c..2343c18a6133 100644
--- a/include/drm/ttm/ttm_set_memory.h
+++ b/drivers/gpu/drm/ttm/ttm_set_memory.h
@@ -57,91 +57,25 @@ static inline int ttm_set_pages_wb(struct page *page, int numpages)
return set_pages_wb(page, numpages);
}
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
- unsigned long addr = (unsigned long)page_address(page);
-
- return set_memory_wc(addr, numpages);
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
- return set_pages_uc(page, numpages);
-}
-
#else /* for CONFIG_X86 */
-#if IS_ENABLED(CONFIG_AGP)
-
-#include <asm/agp.h>
-
static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
return 0;
}
static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
return 0;
}
static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
{
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
return 0;
}
static inline int ttm_set_pages_wb(struct page *page, int numpages)
{
- int i;
-
- for (i = 0; i < numpages; i++)
- unmap_page_from_agp(page++);
- return 0;
-}
-
-#else /* for CONFIG_AGP */
-
-static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_wb(struct page *page, int numpages)
-{
- return 0;
-}
-
-#endif /* for CONFIG_AGP */
-
-static inline int ttm_set_pages_wc(struct page *page, int numpages)
-{
- return 0;
-}
-
-static inline int ttm_set_pages_uc(struct page *page, int numpages)
-{
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index f43fa69a1e65..65c4254eea5c 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -38,7 +38,6 @@
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#include <drm/ttm/ttm_set_memory.h>
/**
* Allocates a ttm structure for the given BO.
@@ -93,21 +92,22 @@ static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0;
}
-static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
- sizeof(*ttm->ttm.pages) +
- sizeof(*ttm->dma_address),
- GFP_KERNEL | __GFP_ZERO);
- if (!ttm->ttm.pages)
+ ttm->pages = kvmalloc_array(ttm->num_pages,
+ sizeof(*ttm->pages) +
+ sizeof(*ttm->dma_address),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!ttm->pages)
return -ENOMEM;
- ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
+
+ ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
return 0;
}
-static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
+static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
+ ttm->dma_address = kvmalloc_array(ttm->num_pages,
sizeof(*ttm->dma_address),
GFP_KERNEL | __GFP_ZERO);
if (!ttm->dma_address)
@@ -115,104 +115,11 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
return 0;
}
-static int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
-{
- int ret = 0;
-
- if (PageHighMem(p))
- return 0;
-
- if (c_old != tt_cached) {
- /* p isn't in the default caching state, set it to
- * writeback first to free its current memtype. */
-
- ret = ttm_set_pages_wb(p, 1);
- if (ret)
- return ret;
- }
-
- if (c_new == tt_wc)
- ret = ttm_set_pages_wc(p, 1);
- else if (c_new == tt_uncached)
- ret = ttm_set_pages_uc(p, 1);
-
- return ret;
-}
-
-/*
- * Change caching policy for the linear kernel map
- * for range of pages in a ttm.
- */
-
-static int ttm_tt_set_caching(struct ttm_tt *ttm,
- enum ttm_caching_state c_state)
-{
- int i, j;
- struct page *cur_page;
- int ret;
-
- if (ttm->caching_state == c_state)
- return 0;
-
- if (!ttm_tt_is_populated(ttm)) {
- /* Change caching but don't populate */
- ttm->caching_state = c_state;
- return 0;
- }
-
- if (ttm->caching_state == tt_cached)
- drm_clflush_pages(ttm->pages, ttm->num_pages);
-
- for (i = 0; i < ttm->num_pages; ++i) {
- cur_page = ttm->pages[i];
- if (likely(cur_page != NULL)) {
- ret = ttm_tt_set_page_caching(cur_page,
- ttm->caching_state,
- c_state);
- if (unlikely(ret != 0))
- goto out_err;
- }
- }
-
- ttm->caching_state = c_state;
-
- return 0;
-
-out_err:
- for (j = 0; j < i; ++j) {
- cur_page = ttm->pages[j];
- if (likely(cur_page != NULL)) {
- (void)ttm_tt_set_page_caching(cur_page, c_state,
- ttm->caching_state);
- }
- }
-
- return ret;
-}
-
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
-{
- enum ttm_caching_state state;
-
- if (placement & TTM_PL_FLAG_WC)
- state = tt_wc;
- else if (placement & TTM_PL_FLAG_UNCACHED)
- state = tt_uncached;
- else
- state = tt_cached;
-
- return ttm_tt_set_caching(ttm, state);
-}
-EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-
void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_unpopulate(bdev, ttm);
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
- ttm->swap_storage)
+ if (ttm->swap_storage)
fput(ttm->swap_storage);
ttm->swap_storage = NULL;
@@ -226,20 +133,23 @@ void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
static void ttm_tt_init_fields(struct ttm_tt *ttm,
struct ttm_buffer_object *bo,
- uint32_t page_flags)
+ uint32_t page_flags,
+ enum ttm_caching caching)
{
ttm->num_pages = bo->num_pages;
- ttm->caching_state = tt_cached;
+ ttm->caching = ttm_cached;
ttm->page_flags = page_flags;
- ttm_tt_set_unpopulated(ttm);
+ ttm->dma_address = NULL;
ttm->swap_storage = NULL;
ttm->sg = bo->sg;
+ INIT_LIST_HEAD(&ttm->pages_list);
+ ttm->caching = caching;
}
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+ uint32_t page_flags, enum ttm_caching caching)
{
- ttm_tt_init_fields(ttm, bo, page_flags);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching);
if (ttm_tt_alloc_page_directory(ttm)) {
pr_err("Failed allocating page table\n");
@@ -251,20 +161,21 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
- kvfree(ttm->pages);
+ if (ttm->pages)
+ kvfree(ttm->pages);
+ else
+ kvfree(ttm->dma_address);
ttm->pages = NULL;
+ ttm->dma_address = NULL;
}
EXPORT_SYMBOL(ttm_tt_fini);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching)
{
- struct ttm_tt *ttm = &ttm_dma->ttm;
-
- ttm_tt_init_fields(ttm, bo, page_flags);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching);
- INIT_LIST_HEAD(&ttm_dma->pages_list);
- if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
+ if (ttm_dma_tt_alloc_page_directory(ttm)) {
pr_err("Failed allocating page table\n");
return -ENOMEM;
}
@@ -272,19 +183,17 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_dma_tt_init);
-int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags)
+int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching)
{
- struct ttm_tt *ttm = &ttm_dma->ttm;
int ret;
- ttm_tt_init_fields(ttm, bo, page_flags);
+ ttm_tt_init_fields(ttm, bo, page_flags, caching);
- INIT_LIST_HEAD(&ttm_dma->pages_list);
if (page_flags & TTM_PAGE_FLAG_SG)
- ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
+ ret = ttm_sg_tt_alloc_page_directory(ttm);
else
- ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
+ ret = ttm_dma_tt_alloc_page_directory(ttm);
if (ret) {
pr_err("Failed allocating page table\n");
return -ENOMEM;
@@ -293,92 +202,73 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_sg_tt_init);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
-{
- struct ttm_tt *ttm = &ttm_dma->ttm;
-
- if (ttm->pages)
- kvfree(ttm->pages);
- else
- kvfree(ttm_dma->dma_address);
- ttm->pages = NULL;
- ttm_dma->dma_address = NULL;
-}
-EXPORT_SYMBOL(ttm_dma_tt_fini);
-
int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- int i;
- int ret = -ENOMEM;
+ gfp_t gfp_mask;
+ int i, ret;
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
swap_space = swap_storage->f_mapping;
+ gfp_mask = mapping_gfp_mask(swap_space);
+ if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+ gfp_mask |= __GFP_RETRY_MAYFAIL;
for (i = 0; i < ttm->num_pages; ++i) {
- gfp_t gfp_mask = mapping_gfp_mask(swap_space);
-
- gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
- from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
-
+ from_page = shmem_read_mapping_page_gfp(swap_space, i,
+ gfp_mask);
if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page);
goto out_err;
}
to_page = ttm->pages[i];
- if (unlikely(to_page == NULL))
+ if (unlikely(to_page == NULL)) {
+ ret = -ENOMEM;
goto out_err;
+ }
copy_highpage(to_page, from_page);
put_page(from_page);
}
- if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
- fput(swap_storage);
+ fput(swap_storage);
ttm->swap_storage = NULL;
ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
return 0;
+
out_err:
return ret;
}
-int ttm_tt_swapout(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm, struct file *persistent_swap_storage)
+int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
struct page *from_page;
struct page *to_page;
- int i;
- int ret = -ENOMEM;
-
- BUG_ON(ttm->caching_state != tt_cached);
-
- if (!persistent_swap_storage) {
- swap_storage = shmem_file_setup("ttm swap",
- ttm->num_pages << PAGE_SHIFT,
- 0);
- if (IS_ERR(swap_storage)) {
- pr_err("Failed allocating swap storage\n");
- return PTR_ERR(swap_storage);
- }
- } else {
- swap_storage = persistent_swap_storage;
+ gfp_t gfp_mask;
+ int i, ret;
+
+ swap_storage = shmem_file_setup("ttm swap",
+ ttm->num_pages << PAGE_SHIFT,
+ 0);
+ if (IS_ERR(swap_storage)) {
+ pr_err("Failed allocating swap storage\n");
+ return PTR_ERR(swap_storage);
}
swap_space = swap_storage->f_mapping;
+ gfp_mask = mapping_gfp_mask(swap_space);
+ if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
+ gfp_mask |= __GFP_RETRY_MAYFAIL;
for (i = 0; i < ttm->num_pages; ++i) {
- gfp_t gfp_mask = mapping_gfp_mask(swap_space);
-
- gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
-
from_page = ttm->pages[i];
if (unlikely(from_page == NULL))
continue;
@@ -397,13 +287,11 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev,
ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
- if (persistent_swap_storage)
- ttm->page_flags |= TTM_PAGE_FLAG_PERSISTENT_SWAP;
return 0;
+
out_err:
- if (!persistent_swap_storage)
- fput(swap_storage);
+ fput(swap_storage);
return ret;
}
@@ -434,9 +322,20 @@ int ttm_tt_populate(struct ttm_bo_device *bdev,
ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
else
ret = ttm_pool_populate(ttm, ctx);
- if (!ret)
- ttm_tt_add_mapping(bdev, ttm);
- return ret;
+ if (ret)
+ return ret;
+
+ ttm_tt_add_mapping(bdev, ttm);
+ ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_tt_unpopulate(bdev, ttm);
+ return ret;
+ }
+ }
+
+ return 0;
}
EXPORT_SYMBOL(ttm_tt_populate);
@@ -465,4 +364,5 @@ void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
bdev->driver->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_unpopulate(ttm);
+ ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
}
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index c3aa39bd38ec..b5259cb1383f 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -200,8 +200,8 @@ static int tve200_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- ret = -EINVAL;
+ if (irq < 0) {
+ ret = irq;
goto clk_disable;
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 4fcc0a542b8a..931c55126148 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -213,12 +213,12 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
}
static void vbox_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
}
static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 74ceebd62fbc..f432278173cd 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -21,6 +21,8 @@
#include "vc4_drv.h"
#include "uapi/drm/vc4_drm.h"
+static vm_fault_t vc4_fault(struct vm_fault *vmf);
+
static const char * const bo_type_names[] = {
"kernel",
"V3D",
@@ -374,6 +376,21 @@ out:
return bo;
}
+static const struct vm_operations_struct vc4_vm_ops = {
+ .fault = vc4_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs vc4_gem_object_funcs = {
+ .free = vc4_free_object,
+ .export = vc4_prime_export,
+ .get_sg_table = drm_gem_cma_prime_get_sg_table,
+ .vmap = vc4_prime_vmap,
+ .vunmap = drm_gem_cma_prime_vunmap,
+ .vm_ops = &vc4_vm_ops,
+};
+
/**
* vc4_gem_create_object - Implementation of driver->gem_create_object.
* @dev: DRM device
@@ -400,6 +417,8 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
mutex_unlock(&vc4->bo_lock);
+ bo->base.base.funcs = &vc4_gem_object_funcs;
+
return &bo->base.base;
}
@@ -684,7 +703,7 @@ struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags)
return dmabuf;
}
-vm_fault_t vc4_fault(struct vm_fault *vmf)
+static vm_fault_t vc4_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 482219fb4db2..f04f5cc8c839 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -472,8 +472,10 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc)
}
static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct vc4_crtc_state *old_vc4_state = to_vc4_crtc_state(old_state);
struct drm_device *dev = crtc->dev;
@@ -499,8 +501,10 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc,
}
static void vc4_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index a17aa1db11b6..8f10f609e4f8 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -140,12 +140,6 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file)
kfree(vc4file);
}
-static const struct vm_operations_struct vc4_vm_ops = {
- .fault = vc4_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static const struct file_operations vc4_drm_fops = {
.owner = THIS_MODULE,
.open = drm_open,
@@ -195,16 +189,10 @@ static struct drm_driver vc4_drm_driver = {
#endif
.gem_create_object = vc4_create_object,
- .gem_free_object_unlocked = vc4_free_object,
- .gem_vm_ops = &vc4_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = vc4_prime_export,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = vc4_prime_import_sg_table,
- .gem_prime_vmap = vc4_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = vc4_prime_mmap,
.dumb_create = vc4_dumb_create,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 66d4fb16db8f..7003e7f14a48 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -801,7 +801,6 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-vm_fault_t vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index eaf276978ee7..19aab4e7e209 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1246,10 +1246,12 @@ reset_fifo_and_return:
return ret;
}
+static const struct component_ops vc4_dsi_ops;
static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct vc4_dsi *dsi = host_to_dsi(host);
+ int ret;
dsi->lanes = device->lanes;
dsi->channel = device->channel;
@@ -1284,6 +1286,12 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
+ ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops);
+ if (ret) {
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ return ret;
+ }
+
return 0;
}
@@ -1662,7 +1670,6 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi;
- int ret;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
@@ -1670,26 +1677,10 @@ static int vc4_dsi_dev_probe(struct platform_device *pdev)
dev_set_drvdata(dev, dsi);
dsi->pdev = pdev;
-
- /* Note, the initialization sequence for DSI and panels is
- * tricky. The component bind above won't get past its
- * -EPROBE_DEFER until the panel/bridge probes. The
- * panel/bridge will return -EPROBE_DEFER until it has a
- * mipi_dsi_host to register its device to. So, we register
- * the host during pdev probe time, so vc4 as a whole can then
- * -EPROBE_DEFER its component bind process until the panel
- * successfully attaches.
- */
dsi->dsi_host.ops = &vc4_dsi_host_ops;
dsi->dsi_host.dev = dev;
mipi_dsi_host_register(&dsi->dsi_host);
- ret = component_add(&pdev->dev, &vc4_dsi_ops);
- if (ret) {
- mipi_dsi_host_unregister(&dsi->dsi_host);
- return ret;
- }
-
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 849dcafbfff1..e0e0b72ea65c 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -13,6 +13,7 @@
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_cma_helper.h>
@@ -401,15 +402,19 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc,
}
static void vc4_txp_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
drm_crtc_vblank_on(crtc);
vc4_hvs_atomic_enable(crtc, old_state);
}
static void vc4_txp_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct drm_device *dev = crtc->dev;
/* Disable vblank irq handling before crtc is disabled. */
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index cb884c890065..fa54a6d1403d 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -50,6 +50,8 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
+static const struct drm_gem_object_funcs vgem_gem_object_funcs;
+
static struct vgem_device {
struct drm_device drm;
struct platform_device *platform;
@@ -167,6 +169,8 @@ static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
if (!obj)
return ERR_PTR(-ENOMEM);
+ obj->base.funcs = &vgem_gem_object_funcs;
+
ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
if (ret) {
kfree(obj);
@@ -401,12 +405,20 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
return 0;
}
+static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
+ .free = vgem_gem_free_object,
+ .pin = vgem_prime_pin,
+ .unpin = vgem_prime_unpin,
+ .get_sg_table = vgem_prime_get_sg_table,
+ .vmap = vgem_prime_vmap,
+ .vunmap = vgem_prime_vunmap,
+ .vm_ops = &vgem_gem_vm_ops,
+};
+
static struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = vgem_open,
.postclose = vgem_postclose,
- .gem_free_object_unlocked = vgem_gem_free_object,
- .gem_vm_ops = &vgem_gem_vm_ops,
.ioctls = vgem_ioctls,
.num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
@@ -415,13 +427,8 @@ static struct drm_driver vgem_driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_pin = vgem_prime_pin,
- .gem_prime_unpin = vgem_prime_unpin,
.gem_prime_import = vgem_prime_import,
.gem_prime_import_sg_table = vgem_prime_import_sg_table,
- .gem_prime_get_sg_table = vgem_prime_get_sg_table,
- .gem_prime_vmap = vgem_prime_vmap,
- .gem_prime_vunmap = vgem_prime_vunmap,
.gem_prime_mmap = vgem_prime_mmap,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 45cc9e900260..dae1bacd86c1 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -129,9 +129,9 @@ int via_mem_alloc(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
dev_priv->agp_initialized)) {
+ mutex_unlock(&dev->struct_mutex);
DRM_ERROR
("Attempt to allocate from uninitialized memory manager.\n");
- mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile
index 92aa2b3d349d..b99fa4a73b68 100644
--- a/drivers/gpu/drm/virtio/Makefile
+++ b/drivers/gpu/drm/virtio/Makefile
@@ -3,7 +3,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \
+virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o virtgpu_vram.o \
virtgpu_display.o virtgpu_vq.o \
virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \
virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index d5b0c543bd6d..f336a8fa6666 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -42,15 +42,21 @@ static void virtio_add_int(struct seq_file *m, const char *name,
static int virtio_gpu_features(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
virtio_add_bool(m, "indirect", vgdev->has_indirect);
virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid);
+ virtio_add_bool(m, "blob resources", vgdev->has_resource_blob);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
+ if (vgdev->host_visible_region.len) {
+ seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region",
+ (unsigned long)vgdev->host_visible_region.addr,
+ (unsigned long)vgdev->host_visible_region.len);
+ }
return 0;
}
@@ -66,9 +72,27 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
return 0;
}
+static int
+virtio_gpu_debugfs_host_visible_mm(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
+ struct drm_printer p;
+
+ if (!vgdev->has_host_visible) {
+ seq_puts(m, "Host allocations not visible to guest\n");
+ return 0;
+ }
+
+ p = drm_seq_file_printer(m);
+ drm_mm_print(&vgdev->host_visible_mm, &p);
+ return 0;
+}
+
static struct drm_info_list virtio_gpu_debugfs_list[] = {
{ "virtio-gpu-features", virtio_gpu_features },
{ "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL },
+ { "virtio-gpu-host-visible-mm", virtio_gpu_debugfs_host_visible_mm },
};
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index f84b7e61311b..48b3194ee051 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -95,12 +95,12 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
}
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index b039f493bda9..86330f1ade72 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -166,6 +166,7 @@ static unsigned int features[] = {
#endif
VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_UUID,
+ VIRTIO_GPU_F_RESOURCE_BLOB,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
@@ -203,7 +204,6 @@ static struct drm_driver driver = {
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_mmap = drm_gem_prime_mmap,
- .gem_prime_export = virtgpu_gem_prime_export,
.gem_prime_import = virtgpu_gem_prime_import,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 55c34b4fc3e9..3c0e17212c33 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -35,6 +35,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
@@ -49,18 +50,21 @@
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
-#define UUID_INITIALIZING 0
-#define UUID_INITIALIZED 1
-#define UUID_INITIALIZATION_FAILED 2
+#define STATE_INITIALIZING 0
+#define STATE_OK 1
+#define STATE_ERR 2
struct virtio_gpu_object_params {
- uint32_t format;
- uint32_t width;
- uint32_t height;
unsigned long size;
bool dumb;
/* 3d */
bool virgl;
+ bool blob;
+
+ /* classic resources only */
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
uint32_t target;
uint32_t bind;
uint32_t depth;
@@ -68,6 +72,12 @@ struct virtio_gpu_object_params {
uint32_t last_level;
uint32_t nr_samples;
uint32_t flags;
+
+ /* blob resources only */
+ uint32_t ctx_id;
+ uint32_t blob_mem;
+ uint32_t blob_flags;
+ uint64_t blob_id;
};
struct virtio_gpu_object {
@@ -75,6 +85,8 @@ struct virtio_gpu_object {
uint32_t hw_res_handle;
bool dumb;
bool created;
+ bool host3d_blob, guest_blob;
+ uint32_t blob_mem, blob_flags;
int uuid_state;
uuid_t uuid;
@@ -88,9 +100,19 @@ struct virtio_gpu_object_shmem {
uint32_t mapped;
};
+struct virtio_gpu_object_vram {
+ struct virtio_gpu_object base;
+ uint32_t map_state;
+ uint32_t map_info;
+ struct drm_mm_node vram_node;
+};
+
#define to_virtio_gpu_shmem(virtio_gpu_object) \
container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
+#define to_virtio_gpu_vram(virtio_gpu_object) \
+ container_of((virtio_gpu_object), struct virtio_gpu_object_vram, base)
+
struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
struct list_head next;
@@ -208,6 +230,10 @@ struct virtio_gpu_device {
bool has_edid;
bool has_indirect;
bool has_resource_assign_uuid;
+ bool has_resource_blob;
+ bool has_host_visible;
+ struct virtio_shm_region host_visible_region;
+ struct drm_mm host_visible_mm;
struct work_struct config_changed_work;
@@ -219,8 +245,10 @@ struct virtio_gpu_device {
uint32_t num_capsets;
struct list_head cap_cache;
- /* protects resource state when exporting */
+ /* protects uuid state when exporting */
spinlock_t resource_export_lock;
+ /* protects map state and host_visible_mm */
+ spinlock_t host_visible_lock;
};
struct virtio_gpu_fpriv {
@@ -229,8 +257,8 @@ struct virtio_gpu_fpriv {
struct mutex context_lock;
};
-/* virtgpu_ioctl.c */
-#define DRM_VIRTIO_NUM_IOCTLS 10
+/* virtio_ioctl.c */
+#define DRM_VIRTIO_NUM_IOCTLS 11
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
@@ -323,12 +351,16 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
@@ -351,6 +383,26 @@ int
virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs);
+int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs, uint64_t offset);
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo);
+
+void
+virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_mem_entry *ents,
+ uint32_t nents);
+void
+virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
+ uint32_t scanout_id,
+ struct virtio_gpu_object *bo,
+ struct drm_framebuffer *fb,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y);
+
/* virtgpu_display.c */
int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
@@ -381,7 +433,11 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
+int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
+ uint32_t *resid);
/* virtgpu_prime.c */
+int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo);
struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
int flags);
struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
@@ -395,4 +451,9 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
/* virtgpu_debugfs.c */
void virtio_gpu_debugfs_init(struct drm_minor *minor);
+/* virtgpu_vram.c */
+bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
+int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object **bo_ptr);
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index c8da7adc6b30..5417f365d1a3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -34,6 +34,10 @@
#include "virtgpu_drv.h"
+#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \
+ VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
+ VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
+
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
@@ -208,11 +212,20 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
switch (param->param) {
case VIRTGPU_PARAM_3D_FEATURES:
- value = vgdev->has_virgl_3d == true ? 1 : 0;
+ value = vgdev->has_virgl_3d ? 1 : 0;
break;
case VIRTGPU_PARAM_CAPSET_QUERY_FIX:
value = 1;
break;
+ case VIRTGPU_PARAM_RESOURCE_BLOB:
+ value = vgdev->has_resource_blob ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_HOST_VISIBLE:
+ value = vgdev->has_host_visible ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_CROSS_DEVICE:
+ value = vgdev->has_resource_assign_uuid ? 1 : 0;
+ break;
default:
return -EINVAL;
}
@@ -301,6 +314,9 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
ri->size = qobj->base.base.size;
ri->res_handle = qobj->hw_res_handle;
+ if (qobj->host3d_blob || qobj->guest_blob)
+ ri->blob_mem = qobj->blob_mem;
+
drm_gem_object_put(gobj);
return 0;
}
@@ -312,6 +328,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
+ struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
@@ -325,6 +342,17 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (objs == NULL)
return -ENOENT;
+ bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ if (bo->guest_blob && !bo->host3d_blob) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
+ if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
@@ -334,9 +362,10 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
ret = -ENOMEM;
goto err_unlock;
}
+
virtio_gpu_cmd_transfer_from_host_3d
- (vgdev, vfpriv->ctx_id, offset, args->level,
- &args->box, objs, fence);
+ (vgdev, vfpriv->ctx_id, offset, args->level, args->stride,
+ args->layer_stride, &args->box, objs, fence);
dma_fence_put(&fence->f);
virtio_gpu_notify(vgdev);
return 0;
@@ -354,6 +383,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
+ struct virtio_gpu_object *bo;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
int ret;
@@ -363,6 +393,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
if (objs == NULL)
return -ENOENT;
+ bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ if (bo->guest_blob && !bo->host3d_blob) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, offset,
@@ -370,6 +406,12 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
objs, NULL);
} else {
virtio_gpu_create_context(dev, file);
+
+ if (!bo->host3d_blob && (args->stride || args->layer_stride)) {
+ ret = -EINVAL;
+ goto err_put_free;
+ }
+
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
@@ -381,8 +423,9 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_transfer_to_host_3d
(vgdev,
- vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &args->box, objs, fence);
+ vfpriv ? vfpriv->ctx_id : 0, offset, args->level,
+ args->stride, args->layer_stride, &args->box, objs,
+ fence);
dma_fence_put(&fence->f);
}
virtio_gpu_notify(vgdev);
@@ -491,6 +534,134 @@ copy_exit:
return 0;
}
+static int verify_blob(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_fpriv *vfpriv,
+ struct virtio_gpu_object_params *params,
+ struct drm_virtgpu_resource_create_blob *rc_blob,
+ bool *guest_blob, bool *host3d_blob)
+{
+ if (!vgdev->has_resource_blob)
+ return -EINVAL;
+
+ if ((rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) ||
+ !rc_blob->blob_flags)
+ return -EINVAL;
+
+ if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
+ if (!vgdev->has_resource_assign_uuid)
+ return -EINVAL;
+ }
+
+ switch (rc_blob->blob_mem) {
+ case VIRTGPU_BLOB_MEM_GUEST:
+ *guest_blob = true;
+ break;
+ case VIRTGPU_BLOB_MEM_HOST3D_GUEST:
+ *guest_blob = true;
+ fallthrough;
+ case VIRTGPU_BLOB_MEM_HOST3D:
+ *host3d_blob = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (*host3d_blob) {
+ if (!vgdev->has_virgl_3d)
+ return -EINVAL;
+
+ /* Must be dword aligned. */
+ if (rc_blob->cmd_size % 4 != 0)
+ return -EINVAL;
+
+ params->ctx_id = vfpriv->ctx_id;
+ params->blob_id = rc_blob->blob_id;
+ } else {
+ if (rc_blob->blob_id != 0)
+ return -EINVAL;
+
+ if (rc_blob->cmd_size != 0)
+ return -EINVAL;
+ }
+
+ params->blob_mem = rc_blob->blob_mem;
+ params->size = rc_blob->size;
+ params->blob = true;
+ params->blob_flags = rc_blob->blob_flags;
+ return 0;
+}
+
+static int virtio_gpu_resource_create_blob(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ int ret = 0;
+ uint32_t handle = 0;
+ bool guest_blob = false;
+ bool host3d_blob = false;
+ struct drm_gem_object *obj;
+ struct virtio_gpu_object *bo;
+ struct virtio_gpu_object_params params = { 0 };
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct drm_virtgpu_resource_create_blob *rc_blob = data;
+
+ if (verify_blob(vgdev, vfpriv, &params, rc_blob,
+ &guest_blob, &host3d_blob))
+ return -EINVAL;
+
+ if (vgdev->has_virgl_3d)
+ virtio_gpu_create_context(dev, file);
+
+ if (rc_blob->cmd_size) {
+ void *buf;
+
+ buf = memdup_user(u64_to_user_ptr(rc_blob->cmd),
+ rc_blob->cmd_size);
+
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size,
+ vfpriv->ctx_id, NULL, NULL);
+ }
+
+ if (guest_blob)
+ ret = virtio_gpu_object_create(vgdev, &params, &bo, NULL);
+ else if (!guest_blob && host3d_blob)
+ ret = virtio_gpu_vram_create(vgdev, &params, &bo);
+ else
+ return -EINVAL;
+
+ if (ret < 0)
+ return ret;
+
+ bo->guest_blob = guest_blob;
+ bo->host3d_blob = host3d_blob;
+ bo->blob_mem = rc_blob->blob_mem;
+ bo->blob_flags = rc_blob->blob_flags;
+
+ obj = &bo->base.base;
+ if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) {
+ ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
+ if (ret) {
+ drm_gem_object_release(obj);
+ return ret;
+ }
+ }
+
+ ret = drm_gem_handle_create(file, obj, &handle);
+ if (ret) {
+ drm_gem_object_release(obj);
+ return ret;
+ }
+ drm_gem_object_put(obj);
+
+ rc_blob->res_handle = bo->hw_res_handle;
+ rc_blob->bo_handle = handle;
+
+ return 0;
+}
+
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_RENDER_ALLOW),
@@ -523,4 +694,8 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl,
DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
+ virtio_gpu_resource_create_blob,
+ DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index eed57a931309..b4ec479c32cd 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -121,6 +121,7 @@ int virtio_gpu_init(struct drm_device *dev)
spin_lock_init(&vgdev->display_info_lock);
spin_lock_init(&vgdev->resource_export_lock);
+ spin_lock_init(&vgdev->host_visible_lock);
ida_init(&vgdev->ctx_id_ida);
ida_init(&vgdev->resource_ida);
init_waitqueue_head(&vgdev->resp_wq);
@@ -152,10 +153,33 @@ int virtio_gpu_init(struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) {
vgdev->has_resource_assign_uuid = true;
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) {
+ vgdev->has_resource_blob = true;
+ }
+ if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region,
+ VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) {
+ if (!devm_request_mem_region(&vgdev->vdev->dev,
+ vgdev->host_visible_region.addr,
+ vgdev->host_visible_region.len,
+ dev_name(&vgdev->vdev->dev))) {
+ DRM_ERROR("Could not reserve host visible region\n");
+ goto err_vqs;
+ }
+
+ DRM_INFO("Host memory window: 0x%lx +0x%lx\n",
+ (unsigned long)vgdev->host_visible_region.addr,
+ (unsigned long)vgdev->host_visible_region.len);
+ vgdev->has_host_visible = true;
+ drm_mm_init(&vgdev->host_visible_mm,
+ (unsigned long)vgdev->host_visible_region.addr,
+ (unsigned long)vgdev->host_visible_region.len);
+ }
- DRM_INFO("features: %cvirgl %cedid\n",
- vgdev->has_virgl_3d ? '+' : '-',
- vgdev->has_edid ? '+' : '-');
+ DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n",
+ vgdev->has_virgl_3d ? '+' : '-',
+ vgdev->has_edid ? '+' : '-',
+ vgdev->has_resource_blob ? '+' : '-',
+ vgdev->has_host_visible ? '+' : '-');
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
@@ -242,6 +266,10 @@ void virtio_gpu_release(struct drm_device *dev)
virtio_gpu_modeset_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
virtio_gpu_cleanup_cap_cache(vgdev);
+
+ if (vgdev->has_host_visible)
+ drm_mm_takedown(&vgdev->host_visible_mm);
+
kfree(vgdev->capsets);
kfree(vgdev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 00d6b95e259d..2d3aa7baffe4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -31,8 +31,7 @@
static int virtio_gpu_virglrenderer_workaround = 1;
module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400);
-static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
- uint32_t *resid)
+int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid)
{
if (virtio_gpu_virglrenderer_workaround) {
/*
@@ -84,6 +83,18 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
}
drm_gem_shmem_free_object(&bo->base.base);
+ } else if (virtio_gpu_is_vram(bo)) {
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+
+ spin_lock(&vgdev->host_visible_lock);
+ if (drm_mm_node_allocated(&vram->vram_node))
+ drm_mm_remove_node(&vram->vram_node);
+
+ spin_unlock(&vgdev->host_visible_lock);
+
+ drm_gem_free_mmap_offset(&vram->base.base.base);
+ drm_gem_object_release(&vram->base.base.base);
+ kfree(vram);
}
}
@@ -107,6 +118,7 @@ static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.close = virtio_gpu_gem_object_close,
.print_info = drm_gem_shmem_print_info,
+ .export = virtgpu_gem_prime_export,
.pin = drm_gem_shmem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
@@ -234,21 +246,24 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
goto err_put_objs;
}
- if (params->virgl) {
- virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
- objs, fence);
- } else {
- virtio_gpu_cmd_create_resource(vgdev, bo, params,
- objs, fence);
- }
-
ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
if (ret != 0) {
virtio_gpu_free_object(&shmem_obj->base);
return ret;
}
- virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ if (params->blob) {
+ virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
+ ents, nents);
+ } else if (params->virgl) {
+ virtio_gpu_cmd_resource_create_3d(vgdev, bo, params,
+ objs, fence);
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ } else {
+ virtio_gpu_cmd_create_resource(vgdev, bo, params,
+ objs, fence);
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
+ }
*bo_ptr = bo;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 6a311cd93440..42ac08ed1442 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -174,12 +174,23 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_h >> 16,
plane->state->src_x >> 16,
plane->state->src_y >> 16);
- virtio_gpu_cmd_set_scanout(vgdev, output->index,
- bo->hw_res_handle,
- plane->state->src_w >> 16,
- plane->state->src_h >> 16,
- plane->state->src_x >> 16,
- plane->state->src_y >> 16);
+
+ if (bo->host3d_blob || bo->guest_blob) {
+ virtio_gpu_cmd_set_scanout_blob
+ (vgdev, output->index, bo,
+ plane->state->fb,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
+ } else {
+ virtio_gpu_cmd_set_scanout(vgdev, output->index,
+ bo->hw_res_handle,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
+ }
}
virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle,
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index acd14ef73d56..1ef1e2f22633 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -34,8 +34,8 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
- wait_event(vgdev->resp_wq, bo->uuid_state != UUID_INITIALIZING);
- if (bo->uuid_state != UUID_INITIALIZED)
+ wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
+ if (bo->uuid_state != STATE_OK)
return -ENODEV;
uuid_copy(uuid, &bo->uuid);
@@ -59,6 +59,24 @@ const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.get_uuid = virtgpu_virtio_get_uuid,
};
+int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
+{
+ int ret;
+ struct virtio_gpu_object_array *objs;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs)
+ return -ENOMEM;
+
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+ ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
int flags)
{
@@ -66,22 +84,20 @@ struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj,
struct drm_device *dev = obj->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
- struct virtio_gpu_object_array *objs;
int ret = 0;
+ bool blob = bo->host3d_blob || bo->guest_blob;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- if (vgdev->has_resource_assign_uuid) {
- objs = virtio_gpu_array_alloc(1);
- if (!objs)
- return ERR_PTR(-ENOMEM);
- virtio_gpu_array_add_obj(objs, &bo->base.base);
-
- ret = virtio_gpu_cmd_resource_assign_uuid(vgdev, objs);
- if (ret)
- return ERR_PTR(ret);
- virtio_gpu_notify(vgdev);
- } else {
- bo->uuid_state = UUID_INITIALIZATION_FAILED;
+ if (!blob) {
+ if (vgdev->has_resource_assign_uuid) {
+ ret = virtio_gpu_resource_assign_uuid(vgdev, bo);
+ if (ret)
+ return ERR_PTR(ret);
+
+ virtio_gpu_notify(vgdev);
+ } else {
+ bo->uuid_state = STATE_ERR;
+ }
}
exp_info.ops = &virtgpu_dmabuf_ops.ops;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 07945ca238e2..857f730747b6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1016,6 +1016,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
@@ -1024,11 +1026,12 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
- struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (use_dma_api)
+ if (virtio_gpu_is_shmem(bo) && use_dma_api) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
shmem->pages, DMA_TO_DEVICE);
+ }
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1041,6 +1044,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
+ cmd_p->stride = cpu_to_le32(stride);
+ cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
@@ -1048,6 +1053,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
+ uint32_t stride,
+ uint32_t layer_stride,
struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
@@ -1067,6 +1074,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
+ cmd_p->stride = cpu_to_le32(stride);
+ cmd_p->layer_stride = cpu_to_le32(layer_stride);
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
@@ -1125,14 +1134,14 @@ static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
uint32_t resp_type = le32_to_cpu(resp->hdr.type);
spin_lock(&vgdev->resource_export_lock);
- WARN_ON(obj->uuid_state != UUID_INITIALIZING);
+ WARN_ON(obj->uuid_state != STATE_INITIALIZING);
if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
- obj->uuid_state == UUID_INITIALIZING) {
- memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
- obj->uuid_state = UUID_INITIALIZED;
+ obj->uuid_state == STATE_INITIALIZING) {
+ import_uuid(&obj->uuid, resp->uuid);
+ obj->uuid_state = STATE_OK;
} else {
- obj->uuid_state = UUID_INITIALIZATION_FAILED;
+ obj->uuid_state = STATE_ERR;
}
spin_unlock(&vgdev->resource_export_lock);
@@ -1151,7 +1160,7 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
if (!resp_buf) {
spin_lock(&vgdev->resource_export_lock);
- bo->uuid_state = UUID_INITIALIZATION_FAILED;
+ bo->uuid_state = STATE_ERR;
spin_unlock(&vgdev->resource_export_lock);
virtio_gpu_array_put_free(objs);
return -ENOMEM;
@@ -1169,3 +1178,134 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
return 0;
}
+
+static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
+{
+ struct virtio_gpu_object *bo =
+ gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
+ struct virtio_gpu_resp_map_info *resp =
+ (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+
+ spin_lock(&vgdev->host_visible_lock);
+
+ if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
+ vram->map_info = resp->map_info;
+ vram->map_state = STATE_OK;
+ } else {
+ vram->map_state = STATE_ERR;
+ }
+
+ spin_unlock(&vgdev->host_visible_lock);
+ wake_up_all(&vgdev->resp_wq);
+}
+
+int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_array *objs, uint64_t offset)
+{
+ struct virtio_gpu_resource_map_blob *cmd_p;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+ struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_resp_map_info *resp_buf;
+
+ resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+ if (!resp_buf) {
+ virtio_gpu_array_put_free(objs);
+ return -ENOMEM;
+ }
+
+ cmd_p = virtio_gpu_alloc_cmd_resp
+ (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
+ sizeof(struct virtio_gpu_resp_map_info), resp_buf);
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->offset = cpu_to_le64(offset);
+ vbuf->objs = objs;
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ return 0;
+}
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
+{
+ struct virtio_gpu_resource_unmap_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void
+virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_mem_entry *ents,
+ uint32_t nents)
+{
+ struct virtio_gpu_resource_create_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
+ cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
+ cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
+ cmd_p->blob_id = cpu_to_le64(params->blob_id);
+ cmd_p->size = cpu_to_le64(params->size);
+ cmd_p->nr_entries = cpu_to_le32(nents);
+
+ vbuf->data_buf = ents;
+ vbuf->data_size = sizeof(*ents) * nents;
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ bo->created = true;
+}
+
+void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
+ uint32_t scanout_id,
+ struct virtio_gpu_object *bo,
+ struct drm_framebuffer *fb,
+ uint32_t width, uint32_t height,
+ uint32_t x, uint32_t y)
+{
+ uint32_t i;
+ struct virtio_gpu_set_scanout_blob *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+ uint32_t format = virtio_gpu_translate_format(fb->format->format);
+
+ cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ memset(cmd_p, 0, sizeof(*cmd_p));
+
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->scanout_id = cpu_to_le32(scanout_id);
+
+ cmd_p->format = cpu_to_le32(format);
+ cmd_p->width = cpu_to_le32(fb->width);
+ cmd_p->height = cpu_to_le32(fb->height);
+
+ for (i = 0; i < 4; i++) {
+ cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
+ cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
+ }
+
+ cmd_p->r.width = cpu_to_le32(width);
+ cmd_p->r.height = cpu_to_le32(height);
+ cmd_p->r.x = cpu_to_le32(x);
+ cmd_p->r.y = cpu_to_le32(y);
+
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
new file mode 100644
index 000000000000..23c21bc4d01e
--- /dev/null
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "virtgpu_drv.h"
+
+static void virtio_gpu_vram_free(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ bool unmap;
+
+ if (bo->created) {
+ spin_lock(&vgdev->host_visible_lock);
+ unmap = drm_mm_node_allocated(&vram->vram_node);
+ spin_unlock(&vgdev->host_visible_lock);
+
+ if (unmap)
+ virtio_gpu_cmd_unmap(vgdev, bo);
+
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ return;
+ }
+}
+
+static const struct vm_operations_struct virtio_gpu_vram_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct virtio_gpu_device *vgdev = obj->dev->dev_private;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ unsigned long vm_size = vma->vm_end - vma->vm_start;
+
+ if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE))
+ return -EINVAL;
+
+ wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING);
+ if (vram->map_state != STATE_OK)
+ return -EINVAL;
+
+ vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+ vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_ops = &virtio_gpu_vram_vm_ops;
+
+ if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED)
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* Partial mappings of GEM buffers don't happen much in practice. */
+ if (vm_size != vram->vram_node.size)
+ return -EINVAL;
+
+ ret = io_remap_pfn_range(vma, vma->vm_start,
+ vram->vram_node.start >> PAGE_SHIFT,
+ vm_size, vma->vm_page_prot);
+ return ret;
+}
+
+static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
+ .open = virtio_gpu_gem_object_open,
+ .close = virtio_gpu_gem_object_close,
+ .free = virtio_gpu_vram_free,
+ .mmap = virtio_gpu_vram_mmap,
+};
+
+bool virtio_gpu_is_vram(struct virtio_gpu_object *bo)
+{
+ return bo->base.base.funcs == &virtio_gpu_vram_funcs;
+}
+
+static int virtio_gpu_vram_map(struct virtio_gpu_object *bo)
+{
+ int ret;
+ uint64_t offset;
+ struct virtio_gpu_object_array *objs;
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+
+ if (!vgdev->has_host_visible)
+ return -EINVAL;
+
+ spin_lock(&vgdev->host_visible_lock);
+ ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node,
+ bo->base.base.size);
+ spin_unlock(&vgdev->host_visible_lock);
+
+ if (ret)
+ return ret;
+
+ objs = virtio_gpu_array_alloc(1);
+ if (!objs) {
+ ret = -ENOMEM;
+ goto err_remove_node;
+ }
+
+ virtio_gpu_array_add_obj(objs, &bo->base.base);
+ /*TODO: Add an error checking helper function in drm_mm.h */
+ offset = vram->vram_node.start - vgdev->host_visible_region.addr;
+
+ ret = virtio_gpu_cmd_map(vgdev, objs, offset);
+ if (ret) {
+ virtio_gpu_array_put_free(objs);
+ goto err_remove_node;
+ }
+
+ return 0;
+
+err_remove_node:
+ spin_lock(&vgdev->host_visible_lock);
+ drm_mm_remove_node(&vram->vram_node);
+ spin_unlock(&vgdev->host_visible_lock);
+ return ret;
+}
+
+int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object **bo_ptr)
+{
+ struct drm_gem_object *obj;
+ struct virtio_gpu_object_vram *vram;
+ int ret;
+
+ vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ obj = &vram->base.base.base;
+ obj->funcs = &virtio_gpu_vram_funcs;
+ drm_gem_private_object_init(vgdev->ddev, obj, params->size);
+
+ /* Create fake offset */
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret) {
+ kfree(vram);
+ return ret;
+ }
+
+ ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle);
+ if (ret) {
+ kfree(vram);
+ return ret;
+ }
+
+ virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL,
+ 0);
+ if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) {
+ ret = virtio_gpu_vram_map(&vram->base);
+ if (ret) {
+ virtio_gpu_vram_free(obj);
+ return ret;
+ }
+ }
+
+ *bo_ptr = &vram->base;
+ return 0;
+}
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 333d3cead0e3..72f779cbfedd 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -4,7 +4,6 @@ vkms-y := \
vkms_plane.o \
vkms_output.o \
vkms_crtc.o \
- vkms_gem.o \
vkms_composer.o \
vkms_writeback.o
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 33c031f27c2c..66c6842d70db 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -5,6 +5,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
#include "vkms_drv.h"
@@ -129,15 +130,15 @@ static void compose_cursor(struct vkms_composer *cursor_composer,
void *vaddr_out)
{
struct drm_gem_object *cursor_obj;
- struct vkms_gem_object *cursor_vkms_obj;
+ struct drm_gem_shmem_object *cursor_shmem_obj;
cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
- cursor_vkms_obj = drm_gem_to_vkms_gem(cursor_obj);
+ cursor_shmem_obj = to_drm_gem_shmem_obj(cursor_obj);
- if (WARN_ON(!cursor_vkms_obj->vaddr))
+ if (WARN_ON(!cursor_shmem_obj->vaddr))
return;
- blend(vaddr_out, cursor_vkms_obj->vaddr,
+ blend(vaddr_out, cursor_shmem_obj->vaddr,
primary_composer, cursor_composer);
}
@@ -147,20 +148,20 @@ static int compose_planes(void **vaddr_out,
{
struct drm_framebuffer *fb = &primary_composer->fb;
struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
- struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
+ struct drm_gem_shmem_object *shmem_obj = to_drm_gem_shmem_obj(gem_obj);
if (!*vaddr_out) {
- *vaddr_out = kzalloc(vkms_obj->gem.size, GFP_KERNEL);
+ *vaddr_out = kzalloc(shmem_obj->base.size, GFP_KERNEL);
if (!*vaddr_out) {
DRM_ERROR("Cannot allocate memory for output frame.");
return -ENOMEM;
}
}
- if (WARN_ON(!vkms_obj->vaddr))
+ if (WARN_ON(!shmem_obj->vaddr))
return -EINVAL;
- memcpy(*vaddr_out, vkms_obj->vaddr, vkms_obj->gem.size);
+ memcpy(*vaddr_out, shmem_obj->vaddr, shmem_obj->base.size);
if (cursor_composer)
compose_cursor(cursor_composer, primary_composer, *vaddr_out);
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 09c012d54d58..e43e4e1b268a 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -214,13 +214,13 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
}
static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
drm_crtc_vblank_on(crtc);
}
static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
drm_crtc_vblank_off(crtc);
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index cb0b6230c22c..25faba5aac08 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -23,6 +23,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
#include "vkms_drv.h"
@@ -39,23 +40,7 @@ bool enable_cursor = true;
module_param_named(enable_cursor, enable_cursor, bool, 0444);
MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
-static const struct file_operations vkms_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = drm_gem_mmap,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .poll = drm_poll,
- .read = drm_read,
- .llseek = no_llseek,
- .release = drm_release,
-};
-
-static const struct vm_operations_struct vkms_gem_vm_ops = {
- .fault = vkms_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
+DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
static void vkms_release(struct drm_device *dev)
{
@@ -97,11 +82,8 @@ static struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.release = vkms_release,
.fops = &vkms_driver_fops,
- .dumb_create = vkms_dumb_create,
- .gem_vm_ops = &vkms_gem_vm_ops,
- .gem_free_object_unlocked = vkms_gem_free_object,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import_sg_table = vkms_prime_import_sg_table,
+ .gem_create_object = drm_gem_shmem_create_object_cached,
+ DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -132,7 +114,7 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev)
dev->mode_config.max_height = YRES_MAX;
dev->mode_config.cursor_width = 512;
dev->mode_config.cursor_height = 512;
- dev->mode_config.preferred_depth = 24;
+ dev->mode_config.preferred_depth = 32;
dev->mode_config.helper_private = &vkms_mode_config_helpers;
return vkms_output_init(vkmsdev, 0);
@@ -184,6 +166,8 @@ static int __init vkms_init(void)
if (ret)
goto out_devres;
+ drm_fbdev_generic_setup(&vkms_device->drm, 0);
+
return 0;
out_devres:
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 380a8f27e156..5ed91ff08cb3 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -88,23 +88,12 @@ struct vkms_device {
struct vkms_output output;
};
-struct vkms_gem_object {
- struct drm_gem_object gem;
- struct mutex pages_lock; /* Page lock used in page fault handler */
- struct page **pages;
- unsigned int vmap_count;
- void *vaddr;
-};
-
#define drm_crtc_to_vkms_output(target) \
container_of(target, struct vkms_output, crtc)
#define drm_device_to_vkms_device(target) \
container_of(target, struct vkms_device, drm)
-#define drm_gem_to_vkms_gem(target)\
- container_of(target, struct vkms_gem_object, gem)
-
#define to_vkms_crtc_state(target)\
container_of(target, struct vkms_crtc_state, base)
@@ -120,24 +109,6 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index);
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
enum drm_plane_type type, int index);
-/* Gem stuff */
-vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
-
-int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args);
-
-void vkms_gem_free_object(struct drm_gem_object *obj);
-
-int vkms_gem_vmap(struct drm_gem_object *obj);
-
-void vkms_gem_vunmap(struct drm_gem_object *obj);
-
-/* Prime */
-struct drm_gem_object *
-vkms_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg);
-
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
size_t *count);
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
deleted file mode 100644
index a017fc59905e..000000000000
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ /dev/null
@@ -1,248 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-
-#include <linux/dma-buf.h>
-#include <linux/shmem_fs.h>
-#include <linux/vmalloc.h>
-#include <drm/drm_prime.h>
-
-#include "vkms_drv.h"
-
-static struct vkms_gem_object *__vkms_gem_create(struct drm_device *dev,
- u64 size)
-{
- struct vkms_gem_object *obj;
- int ret;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
- size = roundup(size, PAGE_SIZE);
- ret = drm_gem_object_init(dev, &obj->gem, size);
- if (ret) {
- kfree(obj);
- return ERR_PTR(ret);
- }
-
- mutex_init(&obj->pages_lock);
-
- return obj;
-}
-
-void vkms_gem_free_object(struct drm_gem_object *obj)
-{
- struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
- gem);
-
- WARN_ON(gem->pages);
- WARN_ON(gem->vaddr);
-
- mutex_destroy(&gem->pages_lock);
- drm_gem_object_release(obj);
- kfree(gem);
-}
-
-vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct vkms_gem_object *obj = vma->vm_private_data;
- unsigned long vaddr = vmf->address;
- pgoff_t page_offset;
- loff_t num_pages;
- vm_fault_t ret = VM_FAULT_SIGBUS;
-
- page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
- num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
-
- if (page_offset > num_pages)
- return VM_FAULT_SIGBUS;
-
- mutex_lock(&obj->pages_lock);
- if (obj->pages) {
- get_page(obj->pages[page_offset]);
- vmf->page = obj->pages[page_offset];
- ret = 0;
- }
- mutex_unlock(&obj->pages_lock);
- if (ret) {
- struct page *page;
- struct address_space *mapping;
-
- mapping = file_inode(obj->gem.filp)->i_mapping;
- page = shmem_read_mapping_page(mapping, page_offset);
-
- if (!IS_ERR(page)) {
- vmf->page = page;
- ret = 0;
- } else {
- switch (PTR_ERR(page)) {
- case -ENOSPC:
- case -ENOMEM:
- ret = VM_FAULT_OOM;
- break;
- case -EBUSY:
- ret = VM_FAULT_RETRY;
- break;
- case -EFAULT:
- case -EINVAL:
- ret = VM_FAULT_SIGBUS;
- break;
- default:
- WARN_ON(PTR_ERR(page));
- ret = VM_FAULT_SIGBUS;
- break;
- }
- }
- }
- return ret;
-}
-
-static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
- struct drm_file *file,
- u32 *handle,
- u64 size)
-{
- struct vkms_gem_object *obj;
- int ret;
-
- if (!file || !dev || !handle)
- return ERR_PTR(-EINVAL);
-
- obj = __vkms_gem_create(dev, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- ret = drm_gem_handle_create(file, &obj->gem, handle);
- if (ret)
- return ERR_PTR(ret);
-
- return &obj->gem;
-}
-
-int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct drm_gem_object *gem_obj;
- u64 pitch, size;
-
- if (!args || !dev || !file)
- return -EINVAL;
-
- pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
- size = pitch * args->height;
-
- if (!size)
- return -EINVAL;
-
- gem_obj = vkms_gem_create(dev, file, &args->handle, size);
- if (IS_ERR(gem_obj))
- return PTR_ERR(gem_obj);
-
- args->size = gem_obj->size;
- args->pitch = pitch;
-
- drm_gem_object_put(gem_obj);
-
- DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
-
- return 0;
-}
-
-static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
-{
- struct drm_gem_object *gem_obj = &vkms_obj->gem;
-
- if (!vkms_obj->pages) {
- struct page **pages = drm_gem_get_pages(gem_obj);
-
- if (IS_ERR(pages))
- return pages;
-
- if (cmpxchg(&vkms_obj->pages, NULL, pages))
- drm_gem_put_pages(gem_obj, pages, false, true);
- }
-
- return vkms_obj->pages;
-}
-
-void vkms_gem_vunmap(struct drm_gem_object *obj)
-{
- struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
-
- mutex_lock(&vkms_obj->pages_lock);
- if (vkms_obj->vmap_count < 1) {
- WARN_ON(vkms_obj->vaddr);
- WARN_ON(vkms_obj->pages);
- mutex_unlock(&vkms_obj->pages_lock);
- return;
- }
-
- vkms_obj->vmap_count--;
-
- if (vkms_obj->vmap_count == 0) {
- vunmap(vkms_obj->vaddr);
- vkms_obj->vaddr = NULL;
- drm_gem_put_pages(obj, vkms_obj->pages, false, true);
- vkms_obj->pages = NULL;
- }
-
- mutex_unlock(&vkms_obj->pages_lock);
-}
-
-int vkms_gem_vmap(struct drm_gem_object *obj)
-{
- struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
- int ret = 0;
-
- mutex_lock(&vkms_obj->pages_lock);
-
- if (!vkms_obj->vaddr) {
- unsigned int n_pages = obj->size >> PAGE_SHIFT;
- struct page **pages = _get_pages(vkms_obj);
-
- if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto out;
- }
-
- vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
- if (!vkms_obj->vaddr)
- goto err_vmap;
- }
-
- vkms_obj->vmap_count++;
- goto out;
-
-err_vmap:
- ret = -ENOMEM;
- drm_gem_put_pages(obj, vkms_obj->pages, false, true);
- vkms_obj->pages = NULL;
-out:
- mutex_unlock(&vkms_obj->pages_lock);
- return ret;
-}
-
-struct drm_gem_object *
-vkms_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach,
- struct sg_table *sg)
-{
- struct vkms_gem_object *obj;
- int npages;
-
- obj = __vkms_gem_create(dev, attach->dmabuf->size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
- DRM_DEBUG_PRIME("Importing %d pages\n", npages);
-
- obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!obj->pages) {
- vkms_gem_free_object(&obj->gem);
- return ERR_PTR(-ENOMEM);
- }
-
- drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
- return &obj->gem;
-}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 6d31265a2ab7..9890137bcb8d 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -5,6 +5,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
#include "vkms_drv.h"
@@ -145,15 +146,15 @@ static int vkms_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_gem_object *gem_obj;
- int ret;
+ void *vaddr;
if (!state->fb)
return 0;
gem_obj = drm_gem_fb_get_obj(state->fb, 0);
- ret = vkms_gem_vmap(gem_obj);
- if (ret)
- DRM_ERROR("vmap failed: %d\n", ret);
+ vaddr = drm_gem_shmem_vmap(gem_obj);
+ if (IS_ERR(vaddr))
+ DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr));
return drm_gem_fb_prepare_fb(plane, state);
}
@@ -162,12 +163,14 @@ static void vkms_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_gem_object *gem_obj;
+ struct drm_gem_shmem_object *shmem_obj;
if (!old_state->fb)
return;
gem_obj = drm_gem_fb_get_obj(old_state->fb, 0);
- vkms_gem_vunmap(gem_obj);
+ shmem_obj = to_drm_gem_shmem_obj(drm_gem_fb_get_obj(old_state->fb, 0));
+ drm_gem_shmem_vunmap(gem_obj, shmem_obj->vaddr);
}
static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 094fa4aa061d..26b903926872 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -6,6 +6,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
static const u32 vkms_wb_formats[] = {
DRM_FORMAT_XRGB8888,
@@ -63,22 +64,20 @@ static int vkms_wb_connector_get_modes(struct drm_connector *connector)
static int vkms_wb_prepare_job(struct drm_writeback_connector *wb_connector,
struct drm_writeback_job *job)
{
- struct vkms_gem_object *vkms_obj;
struct drm_gem_object *gem_obj;
- int ret;
+ void *vaddr;
if (!job->fb)
return 0;
gem_obj = drm_gem_fb_get_obj(job->fb, 0);
- ret = vkms_gem_vmap(gem_obj);
- if (ret) {
- DRM_ERROR("vmap failed: %d\n", ret);
- return ret;
+ vaddr = drm_gem_shmem_vmap(gem_obj);
+ if (IS_ERR(vaddr)) {
+ DRM_ERROR("vmap failed: %li\n", PTR_ERR(vaddr));
+ return PTR_ERR(vaddr);
}
- vkms_obj = drm_gem_to_vkms_gem(gem_obj);
- job->priv = vkms_obj->vaddr;
+ job->priv = vaddr;
return 0;
}
@@ -93,7 +92,7 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
return;
gem_obj = drm_gem_fb_get_obj(job->fb, 0);
- vkms_gem_vunmap(gem_obj);
+ drm_gem_shmem_vunmap(gem_obj, job->priv);
vkmsdev = drm_device_to_vkms_device(gem_obj->dev);
vkms_set_composer(&vkmsdev->output, false);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index e8d66182cd7b..f21881e087db 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -459,9 +459,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
int ret = 0;
/* Buffer objects need to be either pinned or reserved: */
- if (!(dst->mem.placement & TTM_PL_FLAG_NO_EVICT))
+ if (!(dst->pin_count))
dma_resv_assert_held(dst->base.resv);
- if (!(src->mem.placement & TTM_PL_FLAG_NO_EVICT))
+ if (!(src->pin_count))
dma_resv_assert_held(src->base.resv);
if (!ttm_tt_is_populated(dst->ttm)) {
@@ -484,8 +484,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
d.src_pages = src->ttm->pages;
d.dst_num_pages = dst->num_pages;
d.src_num_pages = src->num_pages;
- d.dst_prot = ttm_io_prot(dst->mem.placement, PAGE_KERNEL);
- d.src_prot = ttm_io_prot(src->mem.placement, PAGE_KERNEL);
+ d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
+ d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
d.diff = diff;
for (j = 0; j < h; ++j) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 813f1b148094..263d76ae43f0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -106,7 +106,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
- if (buf->pin_count > 0)
+ if (buf->base.pin_count > 0)
ret = ttm_bo_mem_compat(placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
@@ -155,7 +155,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
- if (buf->pin_count > 0) {
+ if (buf->base.pin_count > 0) {
ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
goto out_unreserve;
@@ -246,12 +246,12 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0 &&
- buf->pin_count == 0) {
+ buf->base.pin_count == 0) {
ctx.interruptible = false;
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
}
- if (buf->pin_count > 0)
+ if (buf->base.pin_count > 0)
ret = ttm_bo_mem_compat(&placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
@@ -343,23 +343,13 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
dma_resv_assert_held(bo->base.resv);
- if (pin) {
- if (vbo->pin_count++ > 0)
- return;
- } else {
- WARN_ON(vbo->pin_count <= 0);
- if (--vbo->pin_count > 0)
- return;
- }
+ if (pin == !!bo->pin_count)
+ return;
pl.fpfn = 0;
pl.lpfn = 0;
pl.mem_type = bo->mem.mem_type;
pl.flags = bo->mem.placement;
- if (pin)
- pl.flags |= TTM_PL_FLAG_NO_EVICT;
- else
- pl.flags &= ~TTM_PL_FLAG_NO_EVICT;
memset(&placement, 0, sizeof(placement));
placement.num_placement = 1;
@@ -368,8 +358,12 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
ret = ttm_bo_validate(bo, &placement, &ctx);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
-}
+ if (pin)
+ ttm_bo_pin(bo);
+ else
+ ttm_bo_unpin(bo);
+}
/**
* vmw_bo_map_and_cache - Map a buffer object and cache the map
@@ -487,6 +481,49 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
ttm_prime_object_kfree(vmw_user_bo, prime);
}
+/**
+ * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
+ *
+ * @dev_priv: Pointer to the device private struct
+ * @size: size of the BO we need
+ * @placement: where to put it
+ * @p_bo: resulting BO
+ *
+ * Creates and pin a simple BO for in kernel use.
+ */
+int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
+ struct ttm_placement *placement,
+ struct ttm_buffer_object **p_bo)
+{
+ unsigned npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_buffer_object *bo;
+ size_t acc_size;
+ int ret;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (unlikely(!bo))
+ return -ENOMEM;
+
+ acc_size = ttm_round_pot(sizeof(*bo));
+ acc_size += ttm_round_pot(npages * sizeof(void *));
+ acc_size += ttm_round_pot(sizeof(struct ttm_tt));
+ ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
+ ttm_bo_type_device, placement, 0,
+ &ctx, acc_size, NULL, NULL, NULL);
+ if (unlikely(ret))
+ goto error_free;
+
+ ttm_bo_pin(bo);
+ ttm_bo_unreserve(bo);
+ *p_bo = bo;
+
+ return 0;
+
+error_free:
+ kfree(bo);
+ return ret;
+}
/**
* vmw_bo_init - Initialize a vmw buffer object
@@ -496,6 +533,7 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
* @size: Buffer object size in bytes.
* @placement: Initial placement.
* @interruptible: Whether waits should be performed interruptible.
+ * @pin: If the BO should be created pinned at a fixed location.
* @bo_free: The buffer object destructor.
* Returns: Zero on success, negative error code on error.
*
@@ -504,9 +542,10 @@ static void vmw_user_bo_destroy(struct ttm_buffer_object *bo)
int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
- bool interruptible,
+ bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo))
{
+ struct ttm_operation_ctx ctx = { interruptible, false };
struct ttm_bo_device *bdev = &dev_priv->bdev;
size_t acc_size;
int ret;
@@ -520,11 +559,16 @@ int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo->base.priority = 3;
vmw_bo->res_tree = RB_ROOT;
- ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
- 0, interruptible, acc_size,
- NULL, NULL, bo_free);
- return ret;
+ ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
+ ttm_bo_type_device, placement,
+ 0, &ctx, acc_size, NULL, NULL, bo_free);
+ if (unlikely(ret))
+ return ret;
+
+ if (pin)
+ ttm_bo_pin(&vmw_bo->base);
+ ttm_bo_unreserve(&vmw_bo->base);
+ return 0;
}
@@ -613,7 +657,7 @@ int vmw_user_bo_alloc(struct vmw_private *dev_priv,
ret = vmw_bo_init(dev_priv, &user_bo->vbo, size,
(dev_priv->has_mob) ?
&vmw_sys_placement :
- &vmw_vram_sys_placement, true,
+ &vmw_vram_sys_placement, true, false,
&vmw_user_bo_destroy);
if (unlikely(ret != 0))
return ret;
@@ -1148,9 +1192,6 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
{
struct vmw_buffer_object *vbo;
- if (mem == NULL)
- return;
-
/* Make sure @bo is embedded in a struct vmw_buffer_object? */
if (bo->destroy != vmw_bo_bo_free &&
bo->destroy != vmw_user_bo_destroy)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 3b41cf63110a..9a9fe10d829b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1245,9 +1245,9 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
!dev_priv->has_mob)
return -ENOMEM;
- ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
- &vmw_mob_ne_placement, 0, false,
- &man->cmd_space);
+ ret = vmw_bo_create_kernel(dev_priv, size,
+ &vmw_mob_placement,
+ &man->cmd_space);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 65e8e7a97724..984d8884357d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -410,8 +410,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (!buf)
return -ENOMEM;
- ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_ne_placement,
- true, vmw_bo_bo_free);
+ ret = vmw_bo_init(dev_priv, buf, new_size, &vmw_mob_placement,
+ true, true, vmw_bo_bo_free);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 31e3e5c9f362..4860370740e0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -372,7 +372,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
return -ENOMEM;
ret = vmw_bo_init(dev_priv, vbo, PAGE_SIZE,
- &vmw_sys_ne_placement, false,
+ &vmw_sys_placement, false, true,
&vmw_bo_bo_free);
if (unlikely(ret != 0))
return ret;
@@ -468,7 +468,10 @@ out_no_query_bo:
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
- (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
+ ttm_resource_manager_evict_all(&dev_priv->bdev, man);
vmw_otables_takedown(dev_priv);
}
if (dev_priv->cman)
@@ -501,7 +504,10 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
- ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+ struct ttm_resource_manager *man;
+
+ man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
+ ttm_resource_manager_evict_all(&dev_priv->bdev, man);
vmw_otables_takedown(dev_priv);
}
}
@@ -1257,7 +1263,7 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
if (ttm_resource_manager_used(man)) {
ttm_resource_manager_set_used(man, false);
spin_unlock(&dev_priv->svga_lock);
- if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+ if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
DRM_ERROR("Failed evicting VRAM buffers.\n");
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
@@ -1364,6 +1370,10 @@ static int vmw_pm_freeze(struct device *kdev)
struct pci_dev *pdev = to_pci_dev(kdev);
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
int ret;
/*
@@ -1384,7 +1394,7 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
- ttm_bo_swapout_all();
+ while (ttm_bo_swapout(&ctx) == 0);
if (dev_priv->enable_fb)
vmw_fifo_resource_dec(dev_priv);
if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 1523b51a7284..b45becbb00f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -99,7 +99,6 @@ struct vmw_fpriv {
* struct vmw_buffer_object - TTM buffer object with vmwgfx additions
* @base: The TTM buffer object
* @res_tree: RB tree of resources using this buffer object as a backing MOB
- * @pin_count: pin depth
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
@@ -110,7 +109,6 @@ struct vmw_fpriv {
struct vmw_buffer_object {
struct ttm_buffer_object base;
struct rb_root res_tree;
- s32 pin_count;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
@@ -845,10 +843,14 @@ extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
SVGAGuestPtr *ptr);
extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
+extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
+ unsigned long size,
+ struct ttm_placement *placement,
+ struct ttm_buffer_object **p_bo);
extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
- bool interruptible,
+ bool interruptible, bool pin,
void (*bo_free)(struct ttm_buffer_object *bo));
extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
@@ -1005,16 +1007,12 @@ extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv,
extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
-extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
-extern struct ttm_placement vmw_vram_gmr_ne_placement;
extern struct ttm_placement vmw_sys_placement;
-extern struct ttm_placement vmw_sys_ne_placement;
extern struct ttm_placement vmw_evictable_placement;
extern struct ttm_placement vmw_srf_placement;
extern struct ttm_placement vmw_mob_placement;
-extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern const struct vmw_sg_table *
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index c59806d40e15..4d60201037d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -406,7 +406,7 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
ret = vmw_bo_init(vmw_priv, vmw_bo, size,
&vmw_sys_placement,
- false,
+ false, false,
&vmw_bo_bo_free);
if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index 551042489036..be325a62c178 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -143,7 +143,7 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type)
ttm_resource_manager_set_used(man, false);
- ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
+ ttm_resource_manager_evict_all(&dev_priv->bdev, man);
ttm_resource_manager_cleanup(man);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index c4017c7a24db..9d1de5b5cc6a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -214,7 +214,7 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc)
* CRTC, it makes more sense to do those at plane update time.
*/
static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
}
@@ -224,7 +224,7 @@ static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc,
* @crtc: CRTC to be turned off
*/
static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index d4d66532f9c9..0b76b3d17d4c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -206,7 +206,7 @@ static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
* @start: First page of the range within the buffer object.
* @end: Last page of the range within the buffer object + 1.
*
- * This is similar to ttm_bo_unmap_virtual_locked() except it takes a subrange.
+ * This is similar to ttm_bo_unmap_virtual() except it takes a subrange.
*/
void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
pgoff_t start, pgoff_t end)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c0f156078dda..00b535831a7a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -370,7 +370,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res,
ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
res->func->backup_placement,
- interruptible,
+ interruptible, false,
&vmw_bo_bo_free);
if (unlikely(ret != 0))
goto out_no_bo;
@@ -867,7 +867,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
mutex_lock(&dev_priv->binding_mutex);
dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
- if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
+ if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
mutex_unlock(&dev_priv->binding_mutex);
return;
}
@@ -1002,7 +1002,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
vbo = res->backup;
ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
- if (!vbo->pin_count) {
+ if (!vbo->base.pin_count) {
ret = ttm_bo_validate
(&vbo->base,
res->func->backup_placement,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 4bf0f5ec4fc2..4bdad2f2d130 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -279,7 +279,7 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc)
* This is called after a mode set has been completed.
*/
static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
}
@@ -289,7 +289,7 @@ static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc,
* @crtc: CRTC to be turned off
*/
static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct vmw_private *dev_priv;
struct vmw_screen_object_unit *sou;
@@ -451,8 +451,8 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
*/
vmw_overlay_pause_all(dev_priv);
ret = vmw_bo_init(dev_priv, vps->bo, size,
- &vmw_vram_ne_placement,
- false, &vmw_bo_bo_free);
+ &vmw_vram_placement,
+ false, true, &vmw_bo_bo_free);
vmw_overlay_resume_all(dev_priv);
if (ret) {
vps->bo = NULL; /* vmw_bo_init frees on error */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index e139fdfd1635..f328aa5839a2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -978,8 +978,8 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
if (unlikely(!buf))
return -ENOMEM;
- ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_ne_placement,
- true, vmw_bo_bo_free);
+ ret = vmw_bo_init(dev_priv, buf, size, &vmw_sys_placement,
+ true, true, vmw_bo_bo_free);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index cf3aafd00837..5b04ec047ef3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -408,12 +408,12 @@ static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc)
}
static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
}
static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct vmw_private *dev_priv;
struct vmw_screen_target_display_unit *stdu;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index c8427998fa35..155ca3a5c7e5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -152,7 +152,7 @@ void vmw_thp_fini(struct vmw_private *dev_priv)
ttm_resource_manager_set_used(man, false);
- ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man);
+ ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man);
if (ret)
return;
spin_lock(&rman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 73116ec70ba5..33e3aa5b18f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -34,56 +34,28 @@ static const struct ttm_place vram_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place vram_ne_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .flags = 0
};
static const struct ttm_place sys_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place sys_ne_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .flags = 0
};
static const struct ttm_place gmr_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place gmr_ne_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .flags = 0
};
static const struct ttm_place mob_placement_flags = {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_MOB,
- .flags = TTM_PL_FLAG_CACHED
-};
-
-static const struct ttm_place mob_ne_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+ .flags = 0
};
struct ttm_placement vmw_vram_placement = {
@@ -98,12 +70,12 @@ static const struct ttm_place vram_gmr_placement_flags[] = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}
};
@@ -112,12 +84,12 @@ static const struct ttm_place gmr_vram_placement_flags[] = {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}
};
@@ -128,29 +100,6 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags
};
-static const struct ttm_place vram_gmr_ne_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_NO_EVICT
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED |
- TTM_PL_FLAG_NO_EVICT
- }
-};
-
-struct ttm_placement vmw_vram_gmr_ne_placement = {
- .num_placement = 2,
- .placement = vram_gmr_ne_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &gmr_ne_placement_flags
-};
-
struct ttm_placement vmw_vram_sys_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
@@ -158,13 +107,6 @@ struct ttm_placement vmw_vram_sys_placement = {
.busy_placement = &sys_placement_flags
};
-struct ttm_placement vmw_vram_ne_placement = {
- .num_placement = 1,
- .placement = &vram_ne_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &vram_ne_placement_flags
-};
-
struct ttm_placement vmw_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
@@ -172,34 +114,27 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
-struct ttm_placement vmw_sys_ne_placement = {
- .num_placement = 1,
- .placement = &sys_ne_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_ne_placement_flags
-};
-
static const struct ttm_place evictable_placement_flags[] = {
{
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_VRAM,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_MOB,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}
};
@@ -208,17 +143,17 @@ static const struct ttm_place nonfixed_placement_flags[] = {
.fpfn = 0,
.lpfn = 0,
.mem_type = TTM_PL_SYSTEM,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_GMR,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}, {
.fpfn = 0,
.lpfn = 0,
.mem_type = VMW_PL_MOB,
- .flags = TTM_PL_FLAG_CACHED
+ .flags = 0
}
};
@@ -243,13 +178,6 @@ struct ttm_placement vmw_mob_placement = {
.busy_placement = &mob_placement_flags
};
-struct ttm_placement vmw_mob_ne_placement = {
- .num_placement = 1,
- .num_busy_placement = 1,
- .placement = &mob_ne_placement_flags,
- .busy_placement = &mob_ne_placement_flags
-};
-
struct ttm_placement vmw_nonfixed_placement = {
.num_placement = 3,
.placement = nonfixed_placement_flags,
@@ -258,7 +186,7 @@ struct ttm_placement vmw_nonfixed_placement = {
};
struct vmw_ttm_tt {
- struct ttm_dma_tt dma_ttm;
+ struct ttm_tt dma_ttm;
struct vmw_private *dev_priv;
int gmr_id;
struct vmw_mob *mob;
@@ -438,8 +366,8 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
return 0;
vsgt->mode = dev_priv->map_mode;
- vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
- vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
+ vsgt->pages = vmw_tt->dma_ttm.pages;
+ vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
vsgt->addrs = vmw_tt->dma_ttm.dma_address;
vsgt->sgt = &vmw_tt->sgt;
@@ -549,7 +477,7 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
{
struct vmw_ttm_tt *vmw_tt =
- container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
return &vmw_tt->vsgt;
}
@@ -559,7 +487,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
struct vmw_ttm_tt *vmw_be =
- container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm);
int ret = 0;
if (!bo_mem)
@@ -603,7 +531,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
- container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm);
if (!vmw_be->bound)
return;
@@ -628,13 +556,13 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_be =
- container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm);
vmw_ttm_unbind(bdev, ttm);
ttm_tt_destroy_common(bdev, ttm);
vmw_ttm_unmap_dma(vmw_be);
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ttm_dma_tt_fini(&vmw_be->dma_ttm);
+ ttm_tt_fini(&vmw_be->dma_ttm);
else
ttm_tt_fini(ttm);
@@ -649,7 +577,7 @@ static int vmw_ttm_populate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct vmw_ttm_tt *vmw_tt =
- container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(ttm, struct vmw_ttm_tt, dma_ttm);
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
int ret;
@@ -678,7 +606,7 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
{
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
- dma_ttm.ttm);
+ dma_ttm);
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
@@ -713,13 +641,15 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags);
+ ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+ ttm_cached);
else
- ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags);
+ ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
+ ttm_cached);
if (unlikely(ret != 0))
goto out_no_init;
- return &vmw_be->dma_ttm.ttm;
+ return &vmw_be->dma_ttm;
out_no_init:
kfree(vmw_be);
return NULL;
@@ -752,6 +682,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resourc
mem->bus.offset = (mem->start << PAGE_SHIFT) +
dev_priv->vram_start;
mem->bus.is_iomem = true;
+ mem->bus.caching = ttm_cached;
break;
default:
return -EINVAL;
@@ -773,6 +704,8 @@ static void vmw_move_notify(struct ttm_buffer_object *bo,
bool evict,
struct ttm_resource *mem)
{
+ if (!mem)
+ return;
vmw_bo_move_notify(bo, mem);
vmw_query_move_notify(bo, mem);
}
@@ -789,19 +722,65 @@ static void vmw_swap_notify(struct ttm_buffer_object *bo)
(void) ttm_bo_wait(bo, false, false);
}
+static int vmw_move(struct ttm_buffer_object *bo,
+ bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem)
+{
+ struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
+ struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
+ int ret;
+
+ if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) {
+ ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
+ if (ret)
+ return ret;
+ }
+
+ vmw_move_notify(bo, evict, new_mem);
+
+ if (old_man->use_tt && new_man->use_tt) {
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
+ }
+ ret = ttm_bo_wait_ctx(bo, ctx);
+ if (ret)
+ goto fail;
+
+ vmw_ttm_unbind(bo->bdev, bo->ttm);
+ ttm_resource_free(bo, &bo->mem);
+ ttm_bo_assign_mem(bo, new_mem);
+ return 0;
+ } else {
+ ret = ttm_bo_move_memcpy(bo, ctx, new_mem);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+fail:
+ swap(*new_mem, bo->mem);
+ vmw_move_notify(bo, false, new_mem);
+ swap(*new_mem, bo->mem);
+ return ret;
+}
+
+static void
+vmw_delete_mem_notify(struct ttm_buffer_object *bo)
+{
+ vmw_move_notify(bo, false, NULL);
+}
struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
- .ttm_tt_bind = &vmw_ttm_bind,
- .ttm_tt_unbind = &vmw_ttm_unbind,
.ttm_tt_destroy = &vmw_ttm_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
- .move = NULL,
+ .move = vmw_move,
.verify_access = vmw_verify_access,
- .move_notify = vmw_move_notify,
+ .delete_mem_notify = vmw_delete_mem_notify,
.swap_notify = vmw_swap_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
};
@@ -817,11 +796,9 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo;
int ret;
- ret = ttm_bo_create(&dev_priv->bdev, bo_size,
- ttm_bo_type_device,
- &vmw_sys_ne_placement,
- 0, false, &bo);
-
+ ret = vmw_bo_create_kernel(dev_priv, bo_size,
+ &vmw_sys_placement,
+ &bo);
if (unlikely(ret != 0))
return ret;
@@ -830,7 +807,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
- container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
+ container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
ret = vmw_ttm_map_dma(vmw_tt);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e69bc373ae2e..f2e2bf6d1421 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -540,7 +540,7 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
if (atomic_read(&vbo->cpu_writers))
return -EBUSY;
- if (vbo->pin_count > 0)
+ if (vbo->base.pin_count > 0)
return 0;
if (validate_as_mob)
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index cc93a8c9547b..98b6d2ba088a 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -381,6 +381,23 @@ void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
fb_cookie);
}
+void xen_drm_front_gem_object_free(struct drm_gem_object *obj)
+{
+ struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
+ int idx;
+
+ if (drm_dev_enter(obj->dev, &idx)) {
+ xen_drm_front_dbuf_destroy(drm_info->front_info,
+ xen_drm_front_dbuf_to_cookie(obj));
+ drm_dev_exit(idx);
+ } else {
+ dbuf_free(&drm_info->front_info->dbuf_list,
+ xen_drm_front_dbuf_to_cookie(obj));
+ }
+
+ xen_drm_front_gem_free_object_unlocked(obj);
+}
+
static int xen_drm_drv_dumb_create(struct drm_file *filp,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
@@ -435,23 +452,6 @@ fail:
return ret;
}
-static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
-{
- struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
- int idx;
-
- if (drm_dev_enter(obj->dev, &idx)) {
- xen_drm_front_dbuf_destroy(drm_info->front_info,
- xen_drm_front_dbuf_to_cookie(obj));
- drm_dev_exit(idx);
- } else {
- dbuf_free(&drm_info->front_info->dbuf_list,
- xen_drm_front_dbuf_to_cookie(obj));
- }
-
- xen_drm_front_gem_free_object_unlocked(obj);
-}
-
static void xen_drm_drv_release(struct drm_device *dev)
{
struct xen_drm_front_drm_info *drm_info = dev->dev_private;
@@ -483,22 +483,12 @@ static const struct file_operations xen_drm_dev_fops = {
.mmap = xen_drm_front_gem_mmap,
};
-static const struct vm_operations_struct xen_drm_drv_vm_ops = {
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static struct drm_driver xen_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.release = xen_drm_drv_release,
- .gem_vm_ops = &xen_drm_drv_vm_ops,
- .gem_free_object_unlocked = xen_drm_drv_free_object_unlocked,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
- .gem_prime_get_sg_table = xen_drm_front_gem_get_sg_table,
- .gem_prime_vmap = xen_drm_front_gem_prime_vmap,
- .gem_prime_vunmap = xen_drm_front_gem_prime_vunmap,
.gem_prime_mmap = xen_drm_front_gem_prime_mmap,
.dumb_create = xen_drm_drv_dumb_create,
.fops = &xen_drm_dev_fops,
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
index 54486d89650e..cefafe859aba 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.h
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -160,4 +160,6 @@ int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
int conn_idx, u64 fb_cookie);
+void xen_drm_front_gem_object_free(struct drm_gem_object *obj);
+
#endif /* __XEN_DRM_FRONT_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index 2f464ef2d53e..4f34ef34ba60 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -57,6 +57,19 @@ static void gem_free_pages_array(struct xen_gem_object *xen_obj)
xen_obj->pages = NULL;
}
+static const struct vm_operations_struct xen_drm_drv_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
+ .free = xen_drm_front_gem_object_free,
+ .get_sg_table = xen_drm_front_gem_get_sg_table,
+ .vmap = xen_drm_front_gem_prime_vmap,
+ .vunmap = xen_drm_front_gem_prime_vunmap,
+ .vm_ops = &xen_drm_drv_vm_ops,
+};
+
static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
size_t size)
{
@@ -67,6 +80,8 @@ static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
if (!xen_obj)
return ERR_PTR(-ENOMEM);
+ xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
+
ret = drm_gem_object_init(dev, &xen_obj->base, size);
if (ret < 0) {
kfree(xen_obj);
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index 98bd48f13fd1..0b3bd62e7631 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -1449,7 +1449,7 @@ static int zynqmp_disp_crtc_setup_clock(struct drm_crtc *crtc,
static void
zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
struct zynqmp_disp *disp = crtc_to_disp(crtc);
struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
@@ -1480,8 +1480,10 @@ zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
static void
zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+ struct drm_atomic_state *state)
{
+ struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
+ crtc);
struct zynqmp_disp *disp = crtc_to_disp(crtc);
struct drm_plane_state *old_plane_state;
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 8e69303aad3f..f3ffc3703a0e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -80,19 +80,7 @@ static struct drm_driver zynqmp_dpsub_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
DRIVER_ATOMIC,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = zynqmp_dpsub_dumb_create,
- .dumb_destroy = drm_gem_dumb_destroy,
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(zynqmp_dpsub_dumb_create),
.fops = &zynqmp_dpsub_drm_fops,
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
index 5259ff2825f9..d2a529eba3c9 100644
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ b/drivers/gpu/drm/zte/zx_vou.c
@@ -350,7 +350,7 @@ static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
}
static void zx_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
@@ -455,7 +455,7 @@ static void zx_crtc_atomic_enable(struct drm_crtc *crtc,
}
static void zx_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_state)
+ struct drm_atomic_state *state)
{
struct zx_crtc *zcrtc = to_zx_crtc(crtc);
const struct zx_crtc_bits *bits = zcrtc->bits;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 087304b1a5d7..1401fd52f37a 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -1034,17 +1034,12 @@ static int vga_switcheroo_runtime_suspend(struct device *dev)
static int vga_switcheroo_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int ret;
mutex_lock(&vgasr_mutex);
vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
mutex_unlock(&vgasr_mutex);
pci_wakeup_bus(pdev->bus);
- ret = dev->bus->pm->runtime_resume(dev);
- if (ret)
- return ret;
-
- return 0;
+ return dev->bus->pm->runtime_resume(dev);
}
/**
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 2f3a5996d3fc..a7f61ba85440 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -76,9 +76,13 @@ static void *vb2_dc_cookie(void *buf_priv)
static void *vb2_dc_vaddr(void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
+ struct dma_buf_map map;
+ int ret;
- if (!buf->vaddr && buf->db_attach)
- buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
+ if (!buf->vaddr && buf->db_attach) {
+ ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
+ buf->vaddr = ret ? NULL : map.vaddr;
+ }
return buf->vaddr;
}
@@ -344,11 +348,13 @@ vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
return 0;
}
-static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
struct vb2_dc_buf *buf = dbuf->priv;
- return buf->vaddr;
+ dma_buf_map_set_vaddr(map, buf->vaddr);
+
+ return 0;
}
static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
@@ -619,6 +625,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv)
{
struct vb2_dc_buf *buf = mem_priv;
struct sg_table *sgt = buf->dma_sgt;
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
if (WARN_ON(!buf->db_attach)) {
pr_err("trying to unpin a not attached buffer\n");
@@ -631,7 +638,7 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv)
}
if (buf->vaddr) {
- dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+ dma_buf_vunmap(buf->db_attach->dmabuf, &map);
buf->vaddr = NULL;
}
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index 748131151c49..030e48218687 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -295,14 +295,18 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
static void *vb2_dma_sg_vaddr(void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
+ struct dma_buf_map map;
+ int ret;
BUG_ON(!buf);
if (!buf->vaddr) {
- if (buf->db_attach)
- buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf);
- else
+ if (buf->db_attach) {
+ ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
+ buf->vaddr = ret ? NULL : map.vaddr;
+ } else {
buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
+ }
}
/* add offset in case userptr is not page-aligned */
@@ -480,11 +484,13 @@ vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
return 0;
}
-static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf)
+static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
struct vb2_dma_sg_buf *buf = dbuf->priv;
- return vb2_dma_sg_vaddr(buf);
+ dma_buf_map_set_vaddr(map, buf->vaddr);
+
+ return 0;
}
static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
@@ -565,6 +571,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
{
struct vb2_dma_sg_buf *buf = mem_priv;
struct sg_table *sgt = buf->dma_sgt;
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
if (WARN_ON(!buf->db_attach)) {
pr_err("trying to unpin a not attached buffer\n");
@@ -577,7 +584,7 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
}
if (buf->vaddr) {
- dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr);
+ dma_buf_vunmap(buf->db_attach->dmabuf, &map);
buf->vaddr = NULL;
}
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index bf5ac63a5742..83f95258ec8c 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -314,11 +314,13 @@ static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
vb2_vmalloc_put(dbuf->priv);
}
-static void *vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf)
+static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
struct vb2_vmalloc_buf *buf = dbuf->priv;
- return buf->vaddr;
+ dma_buf_map_set_vaddr(map, buf->vaddr);
+
+ return 0;
}
static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
@@ -370,26 +372,33 @@ static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flag
static int vb2_vmalloc_map_dmabuf(void *mem_priv)
{
struct vb2_vmalloc_buf *buf = mem_priv;
+ struct dma_buf_map map;
+ int ret;
- buf->vaddr = dma_buf_vmap(buf->dbuf);
+ ret = dma_buf_vmap(buf->dbuf, &map);
+ if (ret)
+ return -EFAULT;
+ buf->vaddr = map.vaddr;
- return buf->vaddr ? 0 : -EFAULT;
+ return 0;
}
static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
{
struct vb2_vmalloc_buf *buf = mem_priv;
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
- dma_buf_vunmap(buf->dbuf, buf->vaddr);
+ dma_buf_vunmap(buf->dbuf, &map);
buf->vaddr = NULL;
}
static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
{
struct vb2_vmalloc_buf *buf = mem_priv;
+ struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(buf->vaddr);
if (buf->vaddr)
- dma_buf_vunmap(buf->dbuf, buf->vaddr);
+ dma_buf_vunmap(buf->dbuf, &map);
kfree(buf);
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 994ab67bc2dc..70eb5ed942d0 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -586,11 +586,13 @@ static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf,
kfree(a);
}
-static void *fastrpc_vmap(struct dma_buf *dmabuf)
+static int fastrpc_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
{
struct fastrpc_buf *buf = dmabuf->priv;
- return buf->virt;
+ dma_buf_map_set_vaddr(map, buf->virt);
+
+ return 0;
}
static int fastrpc_mmap(struct dma_buf *dmabuf,
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 2fe690150420..6851f47613e1 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -2200,7 +2200,7 @@ static ssize_t radeon_show_edid1(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct fb_info *info = dev_get_drvdata(dev);
struct radeonfb_info *rinfo = info->par;
@@ -2212,7 +2212,7 @@ static ssize_t radeon_show_edid2(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
+ struct device *dev = kobj_to_dev(kobj);
struct fb_info *info = dev_get_drvdata(dev);
struct radeonfb_info *rinfo = info->par;
diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c
index 15a9ee7cd734..e9027172c0f5 100644
--- a/drivers/video/fbdev/cirrusfb.c
+++ b/drivers/video/fbdev/cirrusfb.c
@@ -531,7 +531,7 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var,
{
int yres;
/* memory size in pixels */
- unsigned pixels = info->screen_size * 8 / var->bits_per_pixel;
+ unsigned int pixels;
struct cirrusfb_info *cinfo = info->par;
switch (var->bits_per_pixel) {
@@ -573,6 +573,7 @@ static int cirrusfb_check_var(struct fb_var_screeninfo *var,
return -EINVAL;
}
+ pixels = info->screen_size * 8 / var->bits_per_pixel;
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
/* use highest possible virtual resolution */
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 8268bbee8cae..9e5c78e00995 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1215,36 +1215,30 @@ struct fb_cmap32 {
static int fb_getput_cmap(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
- struct fb_cmap_user __user *cmap;
- struct fb_cmap32 __user *cmap32;
- __u32 data;
- int err;
-
- cmap = compat_alloc_user_space(sizeof(*cmap));
- cmap32 = compat_ptr(arg);
+ struct fb_cmap32 cmap32;
+ struct fb_cmap cmap_from;
+ struct fb_cmap_user cmap;
- if (copy_in_user(&cmap->start, &cmap32->start, 2 * sizeof(__u32)))
+ if (copy_from_user(&cmap32, compat_ptr(arg), sizeof(cmap32)))
return -EFAULT;
- if (get_user(data, &cmap32->red) ||
- put_user(compat_ptr(data), &cmap->red) ||
- get_user(data, &cmap32->green) ||
- put_user(compat_ptr(data), &cmap->green) ||
- get_user(data, &cmap32->blue) ||
- put_user(compat_ptr(data), &cmap->blue) ||
- get_user(data, &cmap32->transp) ||
- put_user(compat_ptr(data), &cmap->transp))
- return -EFAULT;
+ cmap = (struct fb_cmap_user) {
+ .start = cmap32.start,
+ .len = cmap32.len,
+ .red = compat_ptr(cmap32.red),
+ .green = compat_ptr(cmap32.green),
+ .blue = compat_ptr(cmap32.blue),
+ .transp = compat_ptr(cmap32.transp),
+ };
- err = do_fb_ioctl(info, cmd, (unsigned long) cmap);
+ if (cmd == FBIOPUTCMAP)
+ return fb_set_user_cmap(&cmap, info);
- if (!err) {
- if (copy_in_user(&cmap32->start,
- &cmap->start,
- 2 * sizeof(__u32)))
- err = -EFAULT;
- }
- return err;
+ lock_fb_info(info);
+ cmap_from = info->cmap;
+ unlock_fb_info(info);
+
+ return fb_cmap_to_user(&cmap_from, &cmap);
}
static int do_fscreeninfo_to_user(struct fb_fix_screeninfo *fix,
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index a547c21c7e92..e332017c6af6 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -1425,7 +1425,6 @@ static int fsl_diu_open(struct fb_info *info, int user)
static int fsl_diu_release(struct fb_info *info, int user)
{
struct mfb_info *mfbi = info->par;
- int res = 0;
spin_lock(&diu_lock);
mfbi->count--;
@@ -1447,7 +1446,7 @@ static int fsl_diu_release(struct fb_info *info, int user)
}
spin_unlock(&diu_lock);
- return res;
+ return 0;
}
static const struct fb_ops fsl_diu_ops = {
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 570439b32655..a3853421b263 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1970,9 +1970,7 @@ int matroxfb_register_driver(struct matroxfb_driver* drv) {
struct matrox_fb_info* minfo;
list_add(&drv->node, &matroxfb_driver_list);
- for (minfo = matroxfb_l(matroxfb_list.next);
- minfo != matroxfb_l(&matroxfb_list);
- minfo = matroxfb_l(minfo->next_fb.next)) {
+ list_for_each_entry(minfo, &matroxfb_list, next_fb) {
void* p;
if (minfo->drivers_count == MATROXFB_MAX_FB_DRIVERS)
@@ -1990,9 +1988,7 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv) {
struct matrox_fb_info* minfo;
list_del(&drv->node);
- for (minfo = matroxfb_l(matroxfb_list.next);
- minfo != matroxfb_l(&matroxfb_list);
- minfo = matroxfb_l(minfo->next_fb.next)) {
+ list_for_each_entry(minfo, &matroxfb_list, next_fb) {
int i;
for (i = 0; i < minfo->drivers_count; ) {
diff --git a/drivers/video/fbdev/mx3fb.c b/drivers/video/fbdev/mx3fb.c
index 603731a5a72e..894617ddabcb 100644
--- a/drivers/video/fbdev/mx3fb.c
+++ b/drivers/video/fbdev/mx3fb.c
@@ -1428,7 +1428,6 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
struct device *dev = mx3fb->dev;
struct mx3fb_platform_data *mx3fb_pdata = dev_get_platdata(dev);
const char *name = mx3fb_pdata->name;
- unsigned int irq;
struct fb_info *fbi;
struct mx3fb_info *mx3fbi;
const struct fb_videomode *mode;
@@ -1441,7 +1440,6 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
}
ichan->client = mx3fb;
- irq = ichan->eof_irq;
if (ichan->dma_chan.chan_id != IDMAC_SDC_0)
return -EINVAL;
diff --git a/drivers/video/fbdev/nvidia/nv_of.c b/drivers/video/fbdev/nvidia/nv_of.c
index 5f3e5179c25a..d20b8779bb32 100644
--- a/drivers/video/fbdev/nvidia/nv_of.c
+++ b/drivers/video/fbdev/nvidia/nv_of.c
@@ -42,8 +42,7 @@ int nvidia_probe_of_connector(struct fb_info *info, int conn, u8 **out_edid)
const char *pname;
int len;
- for (dp = NULL;
- (dp = of_get_next_child(parent, dp)) != NULL;) {
+ for_each_child_of_node(parent, dp) {
pname = of_get_property(dp, "name", NULL);
if (!pname)
continue;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
index 63bd13ba429e..a9fd732f8103 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
@@ -47,18 +47,13 @@ static int tvc_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
dev_dbg(ddata->dev, "connect\n");
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.atv->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.atv->connect(in, dssdev);
}
static void tvc_disconnect(struct omap_dss_device *dssdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
index b4a1aefff766..2fa436475b40 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
@@ -51,16 +51,11 @@ static int dvic_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.dvi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.dvi->connect(in, dssdev);
}
static void dvic_disconnect(struct omap_dss_device *dssdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
index 49551afbdbe0..670b9c6eb5a9 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
@@ -50,18 +50,13 @@ static int hdmic_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
dev_dbg(ddata->dev, "connect\n");
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.hdmi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.hdmi->connect(in, dssdev);
}
static void hdmic_disconnect(struct omap_dss_device *dssdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
index 37c9f5bfaefe..ff3d1e8e1e7b 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
@@ -37,16 +37,11 @@ static int panel_dpi_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.dpi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.dpi->connect(in, dssdev);
}
static void panel_dpi_disconnect(struct omap_dss_device *dssdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index a3912fc8031f..602324c5c9f9 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -59,16 +59,11 @@ static int sharp_ls_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.dpi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.dpi->connect(in, dssdev);
}
static void sharp_ls_disconnect(struct omap_dss_device *dssdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
index 1293515e4b16..8d8b5ff7d43c 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
@@ -506,16 +506,11 @@ static int acx565akm_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.sdi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.sdi->connect(in, dssdev);
}
static void acx565akm_disconnect(struct omap_dss_device *dssdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index bb85b21f0724..afac1d9445aa 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -337,16 +337,11 @@ static int tpo_td043_connect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- int r;
if (omapdss_device_is_connected(dssdev))
return 0;
- r = in->ops.dpi->connect(in, dssdev);
- if (r)
- return r;
-
- return 0;
+ return in->ops.dpi->connect(in, dssdev);
}
static void tpo_td043_disconnect(struct omap_dss_device *dssdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
index 7ca1803bf161..726c190862d4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
@@ -875,15 +875,7 @@ void hdmi4_audio_stop(struct hdmi_core_data *core, struct hdmi_wp_data *wp)
int hdmi4_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
- if (!res) {
- DSSERR("can't get CORE mem resource\n");
- return -EINVAL;
- }
-
- core->base = devm_ioremap_resource(&pdev->dev, res);
+ core->base = devm_platform_ioremap_resource_byname(pdev, "core");
if (IS_ERR(core->base)) {
DSSERR("can't ioremap CORE\n");
return PTR_ERR(core->base);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
index 2f6ff14a48d9..eda29d3032e1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c
@@ -887,15 +887,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
int hdmi5_core_init(struct platform_device *pdev, struct hdmi_core_data *core)
{
- struct resource *res;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
- if (!res) {
- DSSERR("can't get CORE IORESOURCE_MEM HDMI\n");
- return -EINVAL;
- }
-
- core->base = devm_ioremap_resource(&pdev->dev, res);
+ core->base = devm_platform_ioremap_resource_byname(pdev, "core");
if (IS_ERR(core->base)) {
DSSERR("can't ioremap HDMI core\n");
return PTR_ERR(core->base);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
index 9c645adba9e2..6fbfeb01b315 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
@@ -207,19 +207,11 @@ static const struct hdmi_phy_features *hdmi_phy_get_features(void)
int hdmi_phy_init(struct platform_device *pdev, struct hdmi_phy_data *phy)
{
- struct resource *res;
-
phy_feat = hdmi_phy_get_features();
if (!phy_feat)
return -ENODEV;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy");
- if (!res) {
- DSSERR("can't get PHY mem resource\n");
- return -EINVAL;
- }
-
- phy->base = devm_ioremap_resource(&pdev->dev, res);
+ phy->base = devm_platform_ioremap_resource_byname(pdev, "phy");
if (IS_ERR(phy->base)) {
DSSERR("can't ioremap TX PHY\n");
return PTR_ERR(phy->base);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
index 4991be031b0b..c5f89129dcdd 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
@@ -100,15 +100,10 @@ static int hdmi_pll_enable(struct dss_pll *dsspll)
{
struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
struct hdmi_wp_data *wp = pll->wp;
- u16 r = 0;
dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
- r = hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
- if (r)
- return r;
-
- return 0;
+ return hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_BOTHON_ALLCLKS);
}
static void hdmi_pll_disable(struct dss_pll *dsspll)
@@ -220,17 +215,10 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
struct hdmi_wp_data *wp)
{
int r;
- struct resource *res;
pll->wp = wp;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
- if (!res) {
- DSSERR("can't get PLL mem resource\n");
- return -EINVAL;
- }
-
- pll->base = devm_ioremap_resource(&pdev->dev, res);
+ pll->base = devm_platform_ioremap_resource_byname(pdev, "pll");
if (IS_ERR(pll->base)) {
DSSERR("can't ioremap PLLCTRL\n");
return PTR_ERR(pll->base);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index f560fa4d7786..905d642ff9ed 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -890,8 +890,7 @@ static int venc_remove(struct platform_device *pdev)
static int venc_runtime_suspend(struct device *dev)
{
- if (venc.tv_dac_clk)
- clk_disable_unprepare(venc.tv_dac_clk);
+ clk_disable_unprepare(venc.tv_dac_clk);
dispc_runtime_put();
@@ -906,8 +905,7 @@ static int venc_runtime_resume(struct device *dev)
if (r < 0)
return r;
- if (venc.tv_dac_clk)
- clk_prepare_enable(venc.tv_dac_clk);
+ clk_prepare_enable(venc.tv_dac_clk);
return 0;
}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
index f45fe60b9e7d..ca430ca69ba3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
@@ -129,7 +129,6 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
const char * const clkctrl_name[] = { "pll1_clkctrl", "pll2_clkctrl" };
const char * const clkin_name[] = { "video1_clk", "video2_clk" };
- struct resource *res;
struct dss_video_pll *vpll;
void __iomem *pll_base, *clkctrl_base;
struct clk *clk;
@@ -138,14 +137,7 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
/* PLL CONTROL */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, reg_name[id]);
- if (!res) {
- dev_err(&pdev->dev,
- "missing platform resource data for pll%d\n", id);
- return ERR_PTR(-ENODEV);
- }
-
- pll_base = devm_ioremap_resource(&pdev->dev, res);
+ pll_base = devm_platform_ioremap_resource_byname(pdev, reg_name[id]);
if (IS_ERR(pll_base)) {
dev_err(&pdev->dev, "failed to ioremap pll%d reg_name\n", id);
return ERR_CAST(pll_base);
@@ -153,15 +145,7 @@ struct dss_pll *dss_video_pll_init(struct platform_device *pdev, int id,
/* CLOCK CONTROL */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
- clkctrl_name[id]);
- if (!res) {
- dev_err(&pdev->dev,
- "missing platform resource data for pll%d\n", id);
- return ERR_PTR(-ENODEV);
- }
-
- clkctrl_base = devm_ioremap_resource(&pdev->dev, res);
+ clkctrl_base = devm_platform_ioremap_resource_byname(pdev, clkctrl_name[id]);
if (IS_ERR(clkctrl_base)) {
dev_err(&pdev->dev, "failed to ioremap pll%d clkctrl\n", id);
return ERR_CAST(clkctrl_base);
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index 01a7110e61a7..7f79db827b07 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -192,54 +192,6 @@ int sbusfb_ioctl_helper(unsigned long cmd, unsigned long arg,
EXPORT_SYMBOL(sbusfb_ioctl_helper);
#ifdef CONFIG_COMPAT
-static int fbiogetputcmap(struct fb_info *info, unsigned int cmd, unsigned long arg)
-{
- struct fbcmap32 __user *argp = (void __user *)arg;
- struct fbcmap __user *p = compat_alloc_user_space(sizeof(*p));
- u32 addr;
- int ret;
-
- ret = copy_in_user(p, argp, 2 * sizeof(int));
- ret |= get_user(addr, &argp->red);
- ret |= put_user(compat_ptr(addr), &p->red);
- ret |= get_user(addr, &argp->green);
- ret |= put_user(compat_ptr(addr), &p->green);
- ret |= get_user(addr, &argp->blue);
- ret |= put_user(compat_ptr(addr), &p->blue);
- if (ret)
- return -EFAULT;
- return info->fbops->fb_ioctl(info,
- (cmd == FBIOPUTCMAP32) ?
- FBIOPUTCMAP_SPARC : FBIOGETCMAP_SPARC,
- (unsigned long)p);
-}
-
-static int fbiogscursor(struct fb_info *info, unsigned long arg)
-{
- struct fbcursor __user *p = compat_alloc_user_space(sizeof(*p));
- struct fbcursor32 __user *argp = (void __user *)arg;
- compat_uptr_t addr;
- int ret;
-
- ret = copy_in_user(p, argp,
- 2 * sizeof (short) + 2 * sizeof(struct fbcurpos));
- ret |= copy_in_user(&p->size, &argp->size, sizeof(struct fbcurpos));
- ret |= copy_in_user(&p->cmap, &argp->cmap, 2 * sizeof(int));
- ret |= get_user(addr, &argp->cmap.red);
- ret |= put_user(compat_ptr(addr), &p->cmap.red);
- ret |= get_user(addr, &argp->cmap.green);
- ret |= put_user(compat_ptr(addr), &p->cmap.green);
- ret |= get_user(addr, &argp->cmap.blue);
- ret |= put_user(compat_ptr(addr), &p->cmap.blue);
- ret |= get_user(addr, &argp->mask);
- ret |= put_user(compat_ptr(addr), &p->mask);
- ret |= get_user(addr, &argp->image);
- ret |= put_user(compat_ptr(addr), &p->image);
- if (ret)
- return -EFAULT;
- return info->fbops->fb_ioctl(info, FBIOSCURSOR, (unsigned long)p);
-}
-
int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
@@ -248,6 +200,7 @@ int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
case FBIOGATTR:
case FBIOSVIDEO:
case FBIOGVIDEO:
+ case FBIOSCURSOR32:
case FBIOGCURSOR32: /* This is not implemented yet.
Later it should be converted... */
case FBIOSCURPOS:
@@ -255,11 +208,76 @@ int sbusfb_compat_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
case FBIOGCURMAX:
return info->fbops->fb_ioctl(info, cmd, arg);
case FBIOPUTCMAP32:
- return fbiogetputcmap(info, cmd, arg);
- case FBIOGETCMAP32:
- return fbiogetputcmap(info, cmd, arg);
- case FBIOSCURSOR32:
- return fbiogscursor(info, arg);
+ case FBIOPUTCMAP_SPARC: {
+ struct fbcmap32 c;
+ struct fb_cmap cmap;
+ u16 red, green, blue;
+ u8 red8, green8, blue8;
+ unsigned char __user *ured;
+ unsigned char __user *ugreen;
+ unsigned char __user *ublue;
+ unsigned int i;
+
+ if (copy_from_user(&c, compat_ptr(arg), sizeof(c)))
+ return -EFAULT;
+ ured = compat_ptr(c.red);
+ ugreen = compat_ptr(c.green);
+ ublue = compat_ptr(c.blue);
+
+ cmap.len = 1;
+ cmap.red = &red;
+ cmap.green = &green;
+ cmap.blue = &blue;
+ cmap.transp = NULL;
+ for (i = 0; i < c.count; i++) {
+ int err;
+
+ if (get_user(red8, &ured[i]) ||
+ get_user(green8, &ugreen[i]) ||
+ get_user(blue8, &ublue[i]))
+ return -EFAULT;
+
+ red = red8 << 8;
+ green = green8 << 8;
+ blue = blue8 << 8;
+
+ cmap.start = c.index + i;
+ err = fb_set_cmap(&cmap, info);
+ if (err)
+ return err;
+ }
+ return 0;
+ }
+ case FBIOGETCMAP32: {
+ struct fbcmap32 c;
+ unsigned char __user *ured;
+ unsigned char __user *ugreen;
+ unsigned char __user *ublue;
+ struct fb_cmap *cmap = &info->cmap;
+ unsigned int index, i;
+ u8 red, green, blue;
+
+ if (copy_from_user(&c, compat_ptr(arg), sizeof(c)))
+ return -EFAULT;
+ index = c.index;
+ ured = compat_ptr(c.red);
+ ugreen = compat_ptr(c.green);
+ ublue = compat_ptr(c.blue);
+
+ if (index > cmap->len || c.count > cmap->len - index)
+ return -EINVAL;
+
+ for (i = 0; i < c.count; i++) {
+ red = cmap->red[index + i] >> 8;
+ green = cmap->green[index + i] >> 8;
+ blue = cmap->blue[index + i] >> 8;
+ if (put_user(red, &ured[i]) ||
+ put_user(green, &ugreen[i]) ||
+ put_user(blue, &ublue[i]))
+ return -EFAULT;
+ }
+ return 0;
+ }
default:
return -ENOIOCTLCMD;
}
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index c1043420dbd3..c0952cc96bdb 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -341,8 +341,7 @@ static void lcdc_wait_bit(struct sh_mobile_lcdc_priv *priv,
static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv)
{
if (atomic_inc_and_test(&priv->hw_usecnt)) {
- if (priv->dot_clk)
- clk_prepare_enable(priv->dot_clk);
+ clk_prepare_enable(priv->dot_clk);
pm_runtime_get_sync(priv->dev);
}
}
@@ -351,8 +350,7 @@ static void sh_mobile_lcdc_clk_off(struct sh_mobile_lcdc_priv *priv)
{
if (atomic_sub_return(1, &priv->hw_usecnt) == -1) {
pm_runtime_put(priv->dev);
- if (priv->dot_clk)
- clk_disable_unprepare(priv->dot_clk);
+ clk_disable_unprepare(priv->dot_clk);
}
}
diff --git a/drivers/video/fbdev/sis/300vtbl.h b/drivers/video/fbdev/sis/300vtbl.h
index e4b4a2626da4..26b19f721ae4 100644
--- a/drivers/video/fbdev/sis/300vtbl.h
+++ b/drivers/video/fbdev/sis/300vtbl.h
@@ -1061,8 +1061,6 @@ static const unsigned char SiS300_CHTVVCLKUNTSC[] = { 0x29,0x29,0x29,0x29,0x2a,
static const unsigned char SiS300_CHTVVCLKONTSC[] = { 0x2c,0x2c,0x2c,0x2c,0x2d,0x2b };
-static const unsigned char SiS300_CHTVVCLKSONTSC[] = { 0x2c,0x2c,0x2c,0x2c,0x2d,0x2b };
-
static const unsigned char SiS300_CHTVVCLKUPAL[] = { 0x2f,0x2f,0x2f,0x2f,0x2f,0x31 };
static const unsigned char SiS300_CHTVVCLKOPAL[] = { 0x2f,0x2f,0x2f,0x2f,0x30,0x32 };
diff --git a/drivers/video/fbdev/sis/sis_accel.h b/drivers/video/fbdev/sis/sis_accel.h
index c3dfd2a20cf9..98d209658662 100644
--- a/drivers/video/fbdev/sis/sis_accel.h
+++ b/drivers/video/fbdev/sis/sis_accel.h
@@ -140,9 +140,9 @@
#define SiS300Idle \
{ \
- while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){}; \
- while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){}; \
- while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){}; \
+ while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){} \
+ while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){} \
+ while((MMIO_IN16(ivideo->mmio_vbase, BR(16)+2) & 0xE000) != 0xE000){} \
CmdQueLen = MMIO_IN16(ivideo->mmio_vbase, 0x8240); \
}
/* (do three times, because 2D engine seems quite unsure about whether or not it's idle) */
@@ -270,10 +270,10 @@
#define SiS310Idle \
{ \
- while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
- while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
- while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
- while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){}; \
+ while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
+ while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
+ while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
+ while( (MMIO_IN16(ivideo->mmio_vbase, Q_STATUS+2) & 0x8000) != 0x8000){} \
CmdQueLen = 0; \
}
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index a53243abd945..1f0ee7f3f473 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -106,8 +106,9 @@ struct drm_device;
#define DP_AUX_I2C_REPLY_DEFER (0x2 << 2)
#define DP_AUX_I2C_REPLY_MASK (0x3 << 2)
-/* AUX CH addresses */
-/* DPCD */
+/* DPCD Field Address Mapping */
+
+/* Receiver Capability */
#define DP_DPCD_REV 0x000
# define DP_DPCD_REV_10 0x10
# define DP_DPCD_REV_11 0x11
@@ -124,6 +125,7 @@ struct drm_device;
#define DP_MAX_DOWNSPREAD 0x003
# define DP_MAX_DOWNSPREAD_0_5 (1 << 0)
+# define DP_STREAM_REGENERATION_STATUS_CAP (1 << 1) /* 2.0 */
# define DP_NO_AUX_HANDSHAKE_LINK_TRAINING (1 << 6)
# define DP_TPS4_SUPPORTED (1 << 7)
@@ -141,6 +143,7 @@ struct drm_device;
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
# define DP_CAP_ANSI_8B10B (1 << 0)
+# define DP_CAP_ANSI_128B132B (1 << 1) /* 2.0 */
#define DP_DOWN_STREAM_PORT_COUNT 0x007
# define DP_PORT_COUNT_MASK 0x0f
@@ -184,8 +187,14 @@ struct drm_device;
#define DP_FAUX_CAP 0x020 /* 1.2 */
# define DP_FAUX_CAP_1 (1 << 0)
+#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020 /* 2.0 */
+# define DP_FALLBACK_1024x768_60HZ_24BPP (1 << 0)
+# define DP_FALLBACK_1280x720_60HZ_24BPP (1 << 1)
+# define DP_FALLBACK_1920x1080_60HZ_24BPP (1 << 2)
+
#define DP_MSTM_CAP 0x021 /* 1.2 */
# define DP_MST_CAP (1 << 0)
+# define DP_SINGLE_STREAM_SIDEBAND_MSG (1 << 1) /* 2.0 */
#define DP_NUMBER_OF_AUDIO_ENDPOINTS 0x022 /* 1.2 */
@@ -426,13 +435,16 @@ struct drm_device;
#define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1
#define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2
-/* link configuration */
+/* Link Configuration */
#define DP_LINK_BW_SET 0x100
# define DP_LINK_RATE_TABLE 0x00 /* eDP 1.4 */
# define DP_LINK_BW_1_62 0x06
# define DP_LINK_BW_2_7 0x0a
# define DP_LINK_BW_5_4 0x14 /* 1.2 */
# define DP_LINK_BW_8_1 0x1e /* 1.4 */
+# define DP_LINK_BW_10 0x01 /* 2.0 128b/132b Link Layer */
+# define DP_LINK_BW_13_5 0x04 /* 2.0 128b/132b Link Layer */
+# define DP_LINK_BW_20 0x02 /* 2.0 128b/132b Link Layer */
#define DP_LANE_COUNT_SET 0x101
# define DP_LANE_COUNT_MASK 0x0f
@@ -484,12 +496,15 @@ struct drm_device;
# define DP_TRAIN_PRE_EMPHASIS_SHIFT 3
# define DP_TRAIN_MAX_PRE_EMPHASIS_REACHED (1 << 5)
+# define DP_TX_FFE_PRESET_VALUE_MASK (0xf << 0) /* 2.0 128b/132b Link Layer */
+
#define DP_DOWNSPREAD_CTRL 0x107
# define DP_SPREAD_AMP_0_5 (1 << 4)
# define DP_MSA_TIMING_PAR_IGNORE_EN (1 << 7) /* eDP */
#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
# define DP_SET_ANSI_8B10B (1 << 0)
+# define DP_SET_ANSI_128B132B (1 << 1)
#define DP_I2C_SPEED_CONTROL_STATUS 0x109 /* DPI */
/* bitmask as for DP_I2C_SPEED_CAP */
@@ -508,8 +523,19 @@ struct drm_device;
# define DP_LINK_QUAL_PATTERN_ERROR_RATE 2
# define DP_LINK_QUAL_PATTERN_PRBS7 3
# define DP_LINK_QUAL_PATTERN_80BIT_CUSTOM 4
-# define DP_LINK_QUAL_PATTERN_HBR2_EYE 5
-# define DP_LINK_QUAL_PATTERN_MASK 7
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_1 5
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_2 6
+# define DP_LINK_QUAL_PATTERN_CP2520_PAT_3 7
+/* DP 2.0 UHBR10, UHBR13.5, UHBR20 */
+# define DP_LINK_QUAL_PATTERN_128B132B_TPS1 0x08
+# define DP_LINK_QUAL_PATTERN_128B132B_TPS2 0x10
+# define DP_LINK_QUAL_PATTERN_PRSBS9 0x18
+# define DP_LINK_QUAL_PATTERN_PRSBS11 0x20
+# define DP_LINK_QUAL_PATTERN_PRSBS15 0x28
+# define DP_LINK_QUAL_PATTERN_PRSBS23 0x30
+# define DP_LINK_QUAL_PATTERN_PRSBS31 0x38
+# define DP_LINK_QUAL_PATTERN_CUSTOM 0x40
+# define DP_LINK_QUAL_PATTERN_SQUARE 0x48
#define DP_TRAINING_LANE0_1_SET2 0x10f
#define DP_TRAINING_LANE2_3_SET2 0x110
@@ -580,6 +606,7 @@ struct drm_device;
#define DP_PAYLOAD_ALLOCATE_START_TIME_SLOT 0x1c1
#define DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT 0x1c2
+/* Link/Sink Device Status */
#define DP_SINK_COUNT 0x200
/* prior to 1.2 bit 7 was reserved mbz */
# define DP_GET_SINK_COUNT(x) ((((x) & 0x80) >> 1) | ((x) & 0x3f))
@@ -611,9 +638,9 @@ struct drm_device;
#define DP_LINK_STATUS_UPDATED (1 << 7)
#define DP_SINK_STATUS 0x205
-
-#define DP_RECEIVE_PORT_0_STATUS (1 << 0)
-#define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+# define DP_RECEIVE_PORT_0_STATUS (1 << 0)
+# define DP_RECEIVE_PORT_1_STATUS (1 << 1)
+# define DP_STREAM_REGENERATION_STATUS (1 << 2) /* 2.0 */
#define DP_ADJUST_REQUEST_LANE0_1 0x206
#define DP_ADJUST_REQUEST_LANE2_3 0x207
@@ -626,6 +653,12 @@ struct drm_device;
# define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
+/* DP 2.0 128b/132b Link Layer */
+# define DP_ADJUST_TX_FFE_PRESET_LANE0_MASK (0xf << 0)
+# define DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT 0
+# define DP_ADJUST_TX_FFE_PRESET_LANE1_MASK (0xf << 4)
+# define DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT 4
+
#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c
# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03
# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0
@@ -779,20 +812,27 @@ struct drm_device;
#define DP_VC_PAYLOAD_ID_SLOT_1 0x2c1 /* 1.2 MST */
/* up to ID_SLOT_63 at 0x2ff */
+/* Source Device-specific */
#define DP_SOURCE_OUI 0x300
+
+/* Sink Device-specific */
#define DP_SINK_OUI 0x400
+
+/* Branch Device-specific */
#define DP_BRANCH_OUI 0x500
#define DP_BRANCH_ID 0x503
#define DP_BRANCH_REVISION_START 0x509
#define DP_BRANCH_HW_REV 0x509
#define DP_BRANCH_SW_REV 0x50A
+/* Link/Sink Device Power Control */
#define DP_SET_POWER 0x600
# define DP_SET_POWER_D0 0x1
# define DP_SET_POWER_D3 0x2
# define DP_SET_POWER_MASK 0x3
# define DP_SET_POWER_D3_AUX_ON 0x5
+/* eDP-specific */
#define DP_EDP_DPCD_REV 0x700 /* eDP 1.2 */
# define DP_EDP_11 0x00
# define DP_EDP_12 0x01
@@ -876,11 +916,13 @@ struct drm_device;
#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
+/* Sideband MSG Buffers */
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
#define DP_SIDEBAND_MSG_DOWN_REP_BASE 0x1400 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REQ_BASE 0x1600 /* 1.2 MST */
+/* DPRX Event Status Indicator */
#define DP_SINK_COUNT_ESI 0x2002 /* 1.2 */
/* 0-5 sink count */
# define DP_SINK_COUNT_CP_READY (1 << 6)
@@ -934,8 +976,8 @@ struct drm_device;
#define DP_LANE_ALIGN_STATUS_UPDATED_ESI 0x200e /* status same as 0x204 */
#define DP_SINK_STATUS_ESI 0x200f /* status same as 0x205 */
+/* Extended Receiver Capability: See DP_DPCD_REV for definitions */
#define DP_DP13_DPCD_REV 0x2200
-#define DP_DP13_MAX_LINK_RATE 0x2201
#define DP_DPRX_FEATURE_ENUMERATION_LIST 0x2210 /* DP 1.3 */
# define DP_GTC_CAP (1 << 0) /* DP 1.3 */
@@ -947,6 +989,15 @@ struct drm_device;
# define DP_VSC_EXT_CEA_SDP_SUPPORTED (1 << 6) /* DP 1.4 */
# define DP_VSC_EXT_CEA_SDP_CHAINING_SUPPORTED (1 << 7) /* DP 1.4 */
+#define DP_128B132B_SUPPORTED_LINK_RATES 0x2215 /* 2.0 */
+# define DP_UHBR10 (1 << 0)
+# define DP_UHBR20 (1 << 1)
+# define DP_UHBR13_5 (1 << 2)
+
+#define DP_128B132B_TRAINING_AUX_RD_INTERVAL 0x2216 /* 2.0 */
+# define DP_128B132B_TRAINING_AUX_RD_INTERVAL_MASK 0x7f
+
+/* Protocol Converter Extension */
/* HDMI CEC tunneling over AUX DP 1.3 section 5.3.3.3.1 DPCD 1.4+ */
#define DP_CEC_TUNNELING_CAPABILITY 0x3000
# define DP_CEC_TUNNELING_CAPABLE (1 << 0)
@@ -1013,6 +1064,7 @@ struct drm_device;
#define DP_PROTOCOL_CONVERTER_CONTROL_2 0x3052 /* DP 1.3 */
# define DP_CONVERSION_TO_YCBCR422_ENABLE (1 << 0) /* DP 1.3 */
+/* HDCP 1.3 and HDCP 2.2 */
#define DP_AUX_HDCP_BKSV 0x68000
#define DP_AUX_HDCP_RI_PRIME 0x68005
#define DP_AUX_HDCP_AKSV 0x68007
@@ -1058,7 +1110,7 @@ struct drm_device;
#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
-/* Link Training (LT)-tunable PHY Repeaters */
+/* LTTPR: Link Training (LT)-tunable PHY Repeaters */
#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */
#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index e57d0440f00f..023076255a7f 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -36,10 +36,12 @@ struct drm_file;
struct drm_gem_object;
struct drm_master;
struct drm_minor;
+struct dma_buf;
struct dma_buf_attachment;
struct drm_display_mode;
struct drm_mode_create_dumb;
struct drm_printer;
+struct sg_table;
/**
* enum drm_driver_feature - feature flags
@@ -327,32 +329,6 @@ struct drm_driver {
void (*debugfs_init)(struct drm_minor *minor);
/**
- * @gem_free_object_unlocked: deconstructor for drm_gem_objects
- *
- * This is deprecated and should not be used by new drivers. Use
- * &drm_gem_object_funcs.free instead.
- */
- void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
-
- /**
- * @gem_open_object:
- *
- * This callback is deprecated in favour of &drm_gem_object_funcs.open.
- *
- * Driver hook called upon gem handle creation
- */
- int (*gem_open_object) (struct drm_gem_object *, struct drm_file *);
-
- /**
- * @gem_close_object:
- *
- * This callback is deprecated in favour of &drm_gem_object_funcs.close.
- *
- * Driver hook called upon gem handle release
- */
- void (*gem_close_object) (struct drm_gem_object *, struct drm_file *);
-
- /**
* @gem_create_object: constructor for gem objects
*
* Hook for allocating the GEM object struct, for use by the CMA and
@@ -360,6 +336,7 @@ struct drm_driver {
*/
struct drm_gem_object *(*gem_create_object)(struct drm_device *dev,
size_t size);
+
/**
* @prime_handle_to_fd:
*
@@ -382,14 +359,7 @@ struct drm_driver {
*/
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
- /**
- * @gem_prime_export:
- *
- * Export hook for GEM drivers. Deprecated in favour of
- * &drm_gem_object_funcs.export.
- */
- struct dma_buf * (*gem_prime_export)(struct drm_gem_object *obj,
- int flags);
+
/**
* @gem_prime_import:
*
@@ -399,29 +369,6 @@ struct drm_driver {
*/
struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
struct dma_buf *dma_buf);
-
- /**
- * @gem_prime_pin:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.pin.
- */
- int (*gem_prime_pin)(struct drm_gem_object *obj);
-
- /**
- * @gem_prime_unpin:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.unpin.
- */
- void (*gem_prime_unpin)(struct drm_gem_object *obj);
-
-
- /**
- * @gem_prime_get_sg_table:
- *
- * Deprecated hook in favour of &drm_gem_object_funcs.get_sg_table.
- */
- struct sg_table *(*gem_prime_get_sg_table)(struct drm_gem_object *obj);
-
/**
* @gem_prime_import_sg_table:
*
@@ -433,22 +380,6 @@ struct drm_driver {
struct dma_buf_attachment *attach,
struct sg_table *sgt);
/**
- * @gem_prime_vmap:
- *
- * Deprecated vmap hook for GEM drivers. Please use
- * &drm_gem_object_funcs.vmap instead.
- */
- void *(*gem_prime_vmap)(struct drm_gem_object *obj);
-
- /**
- * @gem_prime_vunmap:
- *
- * Deprecated vunmap hook for GEM drivers. Please use
- * &drm_gem_object_funcs.vunmap instead.
- */
- void (*gem_prime_vunmap)(struct drm_gem_object *obj, void *vaddr);
-
- /**
* @gem_prime_mmap:
*
* mmap hook for GEM drivers, used to implement dma-buf mmap in the
@@ -522,14 +453,6 @@ struct drm_driver {
struct drm_device *dev,
uint32_t handle);
- /**
- * @gem_vm_ops: Driver private ops for this object
- *
- * For GEM drivers this is deprecated in favour of
- * &drm_gem_object_funcs.vm_ops.
- */
- const struct vm_operations_struct *gem_vm_ops;
-
/** @major: driver major number */
int major;
/** @minor: driver minor number */
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 337a48321705..c38dd35da00b 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -272,7 +272,7 @@ struct drm_gem_object {
* attachment point for the device. This is invariant over the lifetime
* of a gem object.
*
- * The &drm_driver.gem_free_object_unlocked callback is responsible for
+ * The &drm_gem_object_funcs.free callback is responsible for
* cleaning up the dma_buf attachment and references acquired at import
* time.
*
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 62cc6e6c3a4f..128f88174d32 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -35,7 +35,6 @@ struct vm_area_struct;
* @placement: TTM placement information. Supported placements are \
%TTM_PL_VRAM and %TTM_PL_SYSTEM
* @placements: TTM placement information.
- * @pin_count: Pin counter
*
* The type struct drm_gem_vram_object represents a GEM object that is
* backed by VRAM. It can be used for simple framebuffer devices with
@@ -64,8 +63,6 @@ struct drm_gem_vram_object {
/* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */
struct ttm_placement placement;
struct ttm_place placements[2];
-
- int pin_count;
};
/**
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index a18f73eb3cf6..5ffbb4ed5b35 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -58,6 +58,12 @@ struct drm_mode_config_funcs {
* actual modifier used if the request doesn't have it specified,
* ie. when (@mode_cmd->flags & DRM_MODE_FB_MODIFIERS) == 0.
*
+ * IMPORTANT: These implied modifiers for legacy userspace must be
+ * stored in struct &drm_framebuffer, including all relevant metadata
+ * like &drm_framebuffer.pitches and &drm_framebuffer.offsets if the
+ * modifier enables additional planes beyond the fourcc pixel format
+ * code. This is required by the GETFB2 ioctl.
+ *
* If the parameters are deemed valid and the backing storage objects in
* the underlying memory manager all exist, then the driver allocates
* a new &drm_framebuffer structure, subclassed to contain
@@ -915,6 +921,13 @@ struct drm_mode_config {
* @allow_fb_modifiers:
*
* Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ *
+ * IMPORTANT:
+ *
+ * If this is set the driver must fill out the full implicit modifier
+ * information in their &drm_mode_config_funcs.fb_create hook for legacy
+ * userspace which does not set modifiers. Otherwise the GETFB2 ioctl is
+ * broken for modifier aware userspace.
*/
bool allow_fb_modifiers;
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 4efec30f8bad..bde42988c4b5 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -417,14 +417,10 @@ struct drm_crtc_helper_funcs {
* @atomic_enable must be the inverse of @atomic_disable for atomic
* drivers.
*
- * Drivers can use the @old_crtc_state input parameter if the operations
- * needed to enable the CRTC don't depend solely on the new state but
- * also on the transition between the old state and the new state.
- *
* This function is optional.
*/
void (*atomic_enable)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
@@ -441,15 +437,10 @@ struct drm_crtc_helper_funcs {
* need to implement it if there's no need to disable anything at the
* CRTC level.
*
- * Comparing to @disable, this one provides the additional input
- * parameter @old_crtc_state which could be used to access the old
- * state. Atomic drivers should consider to use this one instead
- * of @disable.
- *
* This function is optional.
*/
void (*atomic_disable)(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state);
+ struct drm_atomic_state *state);
/**
* @get_scanout_position:
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 0f69f9fbf12c..0991a47a1567 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -54,6 +54,7 @@ struct device;
struct dma_buf_export_info;
struct dma_buf;
struct dma_buf_attachment;
+struct dma_buf_map;
enum dma_data_direction;
@@ -82,8 +83,8 @@ struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir);
-void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
-void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
+int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
+void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct dma_buf_map *map);
int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma);
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 0f7cd21d6d74..37102e45e496 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -90,9 +90,6 @@ struct ttm_tt;
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is destroyed or put on the delayed delete list.
* @mem: structure describing current placement.
- * @persistent_swap_storage: Usually the swap storage is deleted for buffers
- * pinned in physical memory. If this behaviour is not desired, this member
- * holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
* @deleted: True if the object is only a zombie and already deleted.
@@ -139,7 +136,6 @@ struct ttm_buffer_object {
*/
struct ttm_resource mem;
- struct file *persistent_swap_storage;
struct ttm_tt *ttm;
bool deleted;
@@ -157,6 +153,7 @@ struct ttm_buffer_object {
struct dma_fence *moving;
unsigned priority;
+ unsigned pin_count;
/**
* Special members that are protected by the reserve lock
@@ -261,6 +258,11 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
*/
int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait);
+static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx)
+{
+ return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
+}
+
/**
* ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
*
@@ -447,50 +449,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
void (*destroy) (struct ttm_buffer_object *));
/**
- * ttm_bo_create
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @size: Requested size of buffer object.
- * @type: Requested type of buffer object.
- * @placement: Initial placement.
- * @page_alignment: Data alignment in pages.
- * @interruptible: If needing to sleep while waiting for GPU resources,
- * sleep interruptible.
- * @p_bo: On successful completion *p_bo points to the created object.
- *
- * This function allocates a ttm_buffer_object, and then calls ttm_bo_init
- * on that object. The destroy function is set to kfree().
- * Returns
- * -ENOMEM: Out of memory.
- * -EINVAL: Invalid placement flags.
- * -ERESTARTSYS: Interrupted by signal while waiting for resources.
- */
-int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size,
- enum ttm_bo_type type, struct ttm_placement *placement,
- uint32_t page_alignment, bool interruptible,
- struct ttm_buffer_object **p_bo);
-
-/**
- * ttm_bo_evict_mm
- *
- * @bdev: Pointer to a ttm_bo_device struct.
- * @mem_type: The memory type.
- *
- * Evicts all buffers on the lru list of the memory type.
- * This is normally part of a VT switch or an
- * out-of-memory-space-due-to-fragmentation handler.
- * The caller must make sure that there are no other processes
- * currently validating buffers, and can do that by taking the
- * struct ttm_bo_device::ttm_lock in write mode.
- *
- * Returns:
- * -EINVAL: Invalid or uninitialized memory type.
- * -ERESTARTSYS: The call was interrupted by a signal while waiting to
- * evict a buffer.
- */
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
-
-/**
* ttm_kmap_obj_virtual
*
* @map: A struct ttm_bo_kmap_obj returned from ttm_bo_kmap.
@@ -583,9 +541,7 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write);
-int ttm_bo_swapout(struct ttm_bo_global *glob,
- struct ttm_operation_ctx *ctx);
-void ttm_bo_swapout_all(void);
+int ttm_bo_swapout(struct ttm_operation_ctx *ctx);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the
@@ -606,6 +562,31 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
return bo->base.dev != NULL;
}
+/**
+ * ttm_bo_pin - Pin the buffer object.
+ * @bo: The buffer object to pin
+ *
+ * Make sure the buffer is not evicted any more during memory pressure.
+ */
+static inline void ttm_bo_pin(struct ttm_buffer_object *bo)
+{
+ dma_resv_assert_held(bo->base.resv);
+ ++bo->pin_count;
+}
+
+/**
+ * ttm_bo_unpin - Unpin the buffer object.
+ * @bo: The buffer object to unpin
+ *
+ * Allows the buffer object to be evicted again during memory pressure.
+ */
+static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
+{
+ dma_resv_assert_held(bo->base.resv);
+ WARN_ON_ONCE(!bo->pin_count);
+ --bo->pin_count;
+}
+
int ttm_mem_evict_first(struct ttm_bo_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 864afa8f6f18..29f6a1d1c853 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -91,31 +91,6 @@ struct ttm_bo_driver {
void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/**
- * ttm_tt_bind
- *
- * @bdev: Pointer to a ttm device
- * @ttm: Pointer to a struct ttm_tt.
- * @bo_mem: Pointer to a struct ttm_resource describing the
- * memory type and location for binding.
- *
- * Bind the backend pages into the aperture in the location
- * indicated by @bo_mem. This function should be able to handle
- * differences between aperture and system page sizes.
- */
- int (*ttm_tt_bind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem);
-
- /**
- * ttm_tt_unbind
- *
- * @bdev: Pointer to a ttm device
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Unbind previously bound backend pages. This function should be
- * able to handle differences between aperture and system page sizes.
- */
- void (*ttm_tt_unbind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
-
- /**
* ttm_tt_destroy
*
* @bdev: Pointer to a ttm device
@@ -181,18 +156,9 @@ struct ttm_bo_driver {
struct file *filp);
/**
- * Hook to notify driver about a driver move so it
- * can do tiling things and book-keeping.
- *
- * @evict: whether this move is evicting the buffer from the graphics
- * address space
+ * Hook to notify driver about a resource delete.
*/
- void (*move_notify)(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_resource *new_mem);
- /* notify the driver we are taking a fault on this BO
- * and have reserved it */
- int (*fault_reserve_notify)(struct ttm_buffer_object *bo);
+ void (*delete_mem_notify)(struct ttm_buffer_object *bo);
/**
* notify the driver that we're about to swap out this bo
@@ -453,15 +419,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
/**
- * ttm_bo_unmap_virtual
- *
- * @bo: tear down the virtual mappings for this BO
- *
- * The caller must take ttm_mem_io_lock before calling this function.
- */
-void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
-
-/**
* ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
@@ -578,32 +535,10 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
/*
* ttm_bo_util.c
*/
-
int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
struct ttm_resource *mem);
void ttm_mem_io_free(struct ttm_bo_device *bdev,
struct ttm_resource *mem);
-/**
- * ttm_bo_move_ttm
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- * @interruptible: Sleep interruptible if waiting.
- * @no_wait_gpu: Return immediately if the GPU is busy.
- * @new_mem: struct ttm_resource indicating where to move.
- *
- * Optimized move function for a buffer object with both old and
- * new placement backed by a TTM. The function will, if successful,
- * free any old aperture space, and set (@new_mem)->mm_node to NULL,
- * and update the (@bo)->mem placement flags. If unsuccessful, the old
- * data remains untouched, and it's up to the caller to free the
- * memory space indicated by @new_mem.
- * Returns:
- * !0: Failure.
- */
-
-int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem);
/**
* ttm_bo_move_memcpy
@@ -628,15 +563,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem);
/**
- * ttm_bo_free_old_node
- *
- * @bo: A pointer to a struct ttm_buffer_object.
- *
- * Utility function to free an old placement after a successful move.
- */
-void ttm_bo_free_old_node(struct ttm_buffer_object *bo);
-
-/**
* ttm_bo_move_accel_cleanup.
*
* @bo: A pointer to a struct ttm_buffer_object.
@@ -669,13 +595,15 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo);
/**
* ttm_io_prot
*
- * @c_state: Caching state.
+ * bo: ttm buffer object
+ * res: ttm resource object
* @tmp: Page protection flag for a normal, cached mapping.
*
* Utility function that returns the pgprot_t that should be used for
* setting up a PTE with the caching model indicated by @c_state.
*/
-pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
+pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res,
+ pgprot_t tmp);
/**
* ttm_bo_tt_bind
@@ -685,13 +613,6 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp);
int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem);
/**
- * ttm_bo_tt_bind
- *
- * Unbind the object tt from a memory resource.
- */
-void ttm_bo_tt_unbind(struct ttm_buffer_object *bo);
-
-/**
* ttm_bo_tt_destroy.
*/
void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h
new file mode 100644
index 000000000000..161624dcf6be
--- /dev/null
+++ b/include/drm/ttm/ttm_caching.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_CACHING_H_
+#define _TTM_CACHING_H_
+
+enum ttm_caching {
+ ttm_uncached,
+ ttm_write_combined,
+ ttm_cached
+};
+
+#endif
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index a6b6ef5f9bf4..8fa1e7df6213 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -61,13 +61,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm);
/**
* Populates and DMA maps pages to fullfil a ttm_dma_populate() request
*/
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt,
struct ttm_operation_ctx *ctx);
/**
* Unpopulates and DMA unmaps pages as part of a
* ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt);
/**
* Output the state of pools to debugfs file
@@ -90,9 +90,9 @@ void ttm_dma_page_alloc_fini(void);
*/
int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+int ttm_dma_populate(struct ttm_tt *ttm_dma, struct device *dev,
struct ttm_operation_ctx *ctx);
-void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, struct device *dev);
#else
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
@@ -107,13 +107,13 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
{
return 0;
}
-static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
+static inline int ttm_dma_populate(struct ttm_tt *ttm_dma,
struct device *dev,
struct ttm_operation_ctx *ctx)
{
return -ENOMEM;
}
-static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
+static inline void ttm_dma_unpopulate(struct ttm_tt *ttm_dma,
struct device *dev)
{
}
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index d4022655eae4..aa6ba4d0cf78 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -43,28 +43,13 @@
#define TTM_PL_PRIV 3
/*
- * Other flags that affects data placement.
- * TTM_PL_FLAG_CACHED indicates cache-coherent mappings
- * if available.
- * TTM_PL_FLAG_SHARED means that another application may
- * reference the buffer.
- * TTM_PL_FLAG_NO_EVICT means that the buffer may never
- * be evicted to make room for other buffers.
* TTM_PL_FLAG_TOPDOWN requests to be placed from the
* top of the memory area, instead of the bottom.
*/
-#define TTM_PL_FLAG_CACHED (1 << 16)
-#define TTM_PL_FLAG_UNCACHED (1 << 17)
-#define TTM_PL_FLAG_WC (1 << 18)
#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
-#define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_FLAG_TOPDOWN (1 << 22)
-#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \
- TTM_PL_FLAG_UNCACHED | \
- TTM_PL_FLAG_WC)
-
/**
* struct ttm_place
*
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 0e172d94a0c1..f48a70d39ac5 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -29,6 +29,7 @@
#include <linux/mutex.h>
#include <linux/dma-fence.h>
#include <drm/drm_print.h>
+#include <drm/ttm/ttm_caching.h>
#define TTM_MAX_BO_PRIORITY 4U
@@ -148,9 +149,10 @@ struct ttm_resource_manager {
* Structure indicating the bus placement of an object.
*/
struct ttm_bus_placement {
- void *addr;
- phys_addr_t offset;
- bool is_iomem;
+ void *addr;
+ phys_addr_t offset;
+ bool is_iomem;
+ enum ttm_caching caching;
};
/**
@@ -228,8 +230,8 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
-int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev,
- struct ttm_resource_manager *man);
+int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
+ struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
struct drm_printer *p);
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 75208c0a0cac..df9a80650feb 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -28,15 +28,14 @@
#define _TTM_TT_H_
#include <linux/types.h>
+#include <drm/ttm/ttm_caching.h>
struct ttm_tt;
struct ttm_resource;
struct ttm_buffer_object;
struct ttm_operation_ctx;
-#define TTM_PAGE_FLAG_WRITE (1 << 3)
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
-#define TTM_PAGE_FLAG_PERSISTENT_SWAP (1 << 5)
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
#define TTM_PAGE_FLAG_SG (1 << 8)
@@ -44,22 +43,17 @@ struct ttm_operation_ctx;
#define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31)
-enum ttm_caching_state {
- tt_uncached,
- tt_wc,
- tt_cached
-};
-
/**
* struct ttm_tt
*
* @pages: Array of pages backing the data.
+ * @page_flags: see TTM_PAGE_FLAG_*
* @num_pages: Number of pages in the page array.
- * @bdev: Pointer to the current struct ttm_bo_device.
- * @be: Pointer to the ttm backend.
+ * @sg: for SG objects via dma-buf
+ * @dma_address: The DMA (bus) addresses of the pages
* @swap_storage: Pointer to shmem struct file for swap storage.
- * @caching_state: The current caching state of the pages.
- * @state: The current binding state of the pages.
+ * @pages_list: used by some page allocation backend
+ * @caching: The current caching state of the pages.
*
* This is a structure holding the pages, caching- and aperture binding
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
@@ -68,10 +62,12 @@ enum ttm_caching_state {
struct ttm_tt {
struct page **pages;
uint32_t page_flags;
- unsigned long num_pages;
- struct sg_table *sg; /* for SG objects via dma-buf */
+ uint32_t num_pages;
+ struct sg_table *sg;
+ dma_addr_t *dma_address;
struct file *swap_storage;
- enum ttm_caching_state caching_state;
+ struct list_head pages_list;
+ enum ttm_caching caching;
};
static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
@@ -79,33 +75,6 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
}
-static inline void ttm_tt_set_unpopulated(struct ttm_tt *tt)
-{
- tt->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
-}
-
-static inline void ttm_tt_set_populated(struct ttm_tt *tt)
-{
- tt->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
-}
-
-/**
- * struct ttm_dma_tt
- *
- * @ttm: Base ttm_tt struct.
- * @dma_address: The DMA (bus) addresses of the pages
- * @pages_list: used by some page allocation backend
- *
- * This is a structure holding the pages, caching- and aperture binding
- * status for a buffer object that isn't backed by fixed (VRAM / AGP)
- * memory.
- */
-struct ttm_dma_tt {
- struct ttm_tt ttm;
- dma_addr_t *dma_address;
- struct list_head pages_list;
-};
-
/**
* ttm_tt_create
*
@@ -123,6 +92,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
* @ttm: The struct ttm_tt.
* @bo: The buffer object we create the ttm for.
* @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ * @caching: the desired caching state of the pages
*
* Create a struct ttm_tt to back data with system memory pages.
* No pages are actually allocated.
@@ -130,11 +100,11 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
* NULL: Out of memory.
*/
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
- uint32_t page_flags);
-int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags);
-int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
- uint32_t page_flags);
+ uint32_t page_flags, enum ttm_caching caching);
+int ttm_dma_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching);
+int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
+ uint32_t page_flags, enum ttm_caching caching);
/**
* ttm_tt_fini
@@ -144,7 +114,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
* Free memory of ttm_tt structure
*/
void ttm_tt_fini(struct ttm_tt *ttm);
-void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
/**
* ttm_ttm_destroy:
@@ -170,22 +139,7 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
* Swap in a previously swap out ttm_tt.
*/
int ttm_tt_swapin(struct ttm_tt *ttm);
-
-/**
- * ttm_tt_set_placement_caching:
- *
- * @ttm A struct ttm_tt the backing pages of which will change caching policy.
- * @placement: Flag indicating the desired caching policy.
- *
- * This function will change caching policy of any default kernel mappings of
- * the pages backing @ttm. If changing from cached to uncached or
- * write-combined,
- * all CPU caches will first be flushed to make sure the data of the pages
- * hit RAM. This function may be very costly as it involves global TLB
- * and cache flushes and potential page splitting / combining.
- */
-int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement);
-int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct file *persistent_swap_storage);
+int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_populate - allocate pages for a ttm
diff --git a/include/linux/dma-buf-map.h b/include/linux/dma-buf-map.h
new file mode 100644
index 000000000000..fd1aba545fdf
--- /dev/null
+++ b/include/linux/dma-buf-map.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Pointer to dma-buf-mapped memory, plus helpers.
+ */
+
+#ifndef __DMA_BUF_MAP_H__
+#define __DMA_BUF_MAP_H__
+
+#include <linux/io.h>
+
+/**
+ * DOC: overview
+ *
+ * Calling dma-buf's vmap operation returns a pointer to the buffer's memory.
+ * Depending on the location of the buffer, users may have to access it with
+ * I/O operations or memory load/store operations. For example, copying to
+ * system memory could be done with memcpy(), copying to I/O memory would be
+ * done with memcpy_toio().
+ *
+ * .. code-block:: c
+ *
+ * void *vaddr = ...; // pointer to system memory
+ * memcpy(vaddr, src, len);
+ *
+ * void *vaddr_iomem = ...; // pointer to I/O memory
+ * memcpy_toio(vaddr, _iomem, src, len);
+ *
+ * When using dma-buf's vmap operation, the returned pointer is encoded as
+ * :c:type:`struct dma_buf_map <dma_buf_map>`.
+ * :c:type:`struct dma_buf_map <dma_buf_map>` stores the buffer's address in
+ * system or I/O memory and a flag that signals the required method of
+ * accessing the buffer. Use the returned instance and the helper functions
+ * to access the buffer's memory in the correct way.
+ *
+ * Open-coding access to :c:type:`struct dma_buf_map <dma_buf_map>` is
+ * considered bad style. Rather then accessing its fields directly, use one
+ * of the provided helper functions, or implement your own. For example,
+ * instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be initialized
+ * statically with DMA_BUF_MAP_INIT_VADDR(), or at runtime with
+ * dma_buf_map_set_vaddr(). These helpers will set an address in system memory.
+ *
+ * .. code-block:: c
+ *
+ * struct dma_buf_map map = DMA_BUF_MAP_INIT_VADDR(0xdeadbeaf);
+ *
+ * dma_buf_map_set_vaddr(&map. 0xdeadbeaf);
+ *
+ * Test if a mapping is valid with either dma_buf_map_is_set() or
+ * dma_buf_map_is_null().
+ *
+ * .. code-block:: c
+ *
+ * if (dma_buf_map_is_set(&map) != dma_buf_map_is_null(&map))
+ * // always true
+ *
+ * Instances of :c:type:`struct dma_buf_map <dma_buf_map>` can be compared
+ * for equality with dma_buf_map_is_equal(). Mappings the point to different
+ * memory spaces, system or I/O, are never equal. That's even true if both
+ * spaces are located in the same address space, both mappings contain the
+ * same address value, or both mappings refer to NULL.
+ *
+ * .. code-block:: c
+ *
+ * struct dma_buf_map sys_map; // refers to system memory
+ * struct dma_buf_map io_map; // refers to I/O memory
+ *
+ * if (dma_buf_map_is_equal(&sys_map, &io_map))
+ * // always false
+ *
+ * Instances of struct dma_buf_map do not have to be cleaned up, but
+ * can be cleared to NULL with dma_buf_map_clear(). Cleared mappings
+ * always refer to system memory.
+ *
+ * The type :c:type:`struct dma_buf_map <dma_buf_map>` and its helpers are
+ * actually independent from the dma-buf infrastructure. When sharing buffers
+ * among devices, drivers have to know the location of the memory to access
+ * the buffers in a safe way. :c:type:`struct dma_buf_map <dma_buf_map>`
+ * solves this problem for dma-buf and its users. If other drivers or
+ * sub-systems require similar functionality, the type could be generalized
+ * and moved to a more prominent header file.
+ */
+
+/**
+ * struct dma_buf_map - Pointer to vmap'ed dma-buf memory.
+ * @vaddr_iomem: The buffer's address if in I/O memory
+ * @vaddr: The buffer's address if in system memory
+ * @is_iomem: True if the dma-buf memory is located in I/O
+ * memory, or false otherwise.
+ */
+struct dma_buf_map {
+ union {
+ void __iomem *vaddr_iomem;
+ void *vaddr;
+ };
+ bool is_iomem;
+};
+
+/**
+ * DMA_BUF_MAP_INIT_VADDR - Initializes struct dma_buf_map to an address in system memory
+ * @vaddr: A system-memory address
+ */
+#define DMA_BUF_MAP_INIT_VADDR(vaddr_) \
+ { \
+ .vaddr = (vaddr_), \
+ .is_iomem = false, \
+ }
+
+/**
+ * dma_buf_map_set_vaddr - Sets a dma-buf mapping structure to an address in system memory
+ * @map: The dma-buf mapping structure
+ * @vaddr: A system-memory address
+ *
+ * Sets the address and clears the I/O-memory flag.
+ */
+static inline void dma_buf_map_set_vaddr(struct dma_buf_map *map, void *vaddr)
+{
+ map->vaddr = vaddr;
+ map->is_iomem = false;
+}
+
+/**
+ * dma_buf_map_is_equal - Compares two dma-buf mapping structures for equality
+ * @lhs: The dma-buf mapping structure
+ * @rhs: A dma-buf mapping structure to compare with
+ *
+ * Two dma-buf mapping structures are equal if they both refer to the same type of memory
+ * and to the same address within that memory.
+ *
+ * Returns:
+ * True is both structures are equal, or false otherwise.
+ */
+static inline bool dma_buf_map_is_equal(const struct dma_buf_map *lhs,
+ const struct dma_buf_map *rhs)
+{
+ if (lhs->is_iomem != rhs->is_iomem)
+ return false;
+ else if (lhs->is_iomem)
+ return lhs->vaddr_iomem == rhs->vaddr_iomem;
+ else
+ return lhs->vaddr == rhs->vaddr;
+}
+
+/**
+ * dma_buf_map_is_null - Tests for a dma-buf mapping to be NULL
+ * @map: The dma-buf mapping structure
+ *
+ * Depending on the state of struct dma_buf_map.is_iomem, tests if the
+ * mapping is NULL.
+ *
+ * Returns:
+ * True if the mapping is NULL, or false otherwise.
+ */
+static inline bool dma_buf_map_is_null(const struct dma_buf_map *map)
+{
+ if (map->is_iomem)
+ return !map->vaddr_iomem;
+ return !map->vaddr;
+}
+
+/**
+ * dma_buf_map_is_set - Tests is the dma-buf mapping has been set
+ * @map: The dma-buf mapping structure
+ *
+ * Depending on the state of struct dma_buf_map.is_iomem, tests if the
+ * mapping has been set.
+ *
+ * Returns:
+ * True if the mapping is been set, or false otherwise.
+ */
+static inline bool dma_buf_map_is_set(const struct dma_buf_map *map)
+{
+ return !dma_buf_map_is_null(map);
+}
+
+/**
+ * dma_buf_map_clear - Clears a dma-buf mapping structure
+ * @map: The dma-buf mapping structure
+ *
+ * Clears all fields to zero; including struct dma_buf_map.is_iomem. So
+ * mapping structures that were set to point to I/O memory are reset for
+ * system memory. Pointers are cleared to NULL. This is the default.
+ */
+static inline void dma_buf_map_clear(struct dma_buf_map *map)
+{
+ if (map->is_iomem) {
+ map->vaddr_iomem = NULL;
+ map->is_iomem = false;
+ } else {
+ map->vaddr = NULL;
+ }
+}
+
+#endif /* __DMA_BUF_MAP_H__ */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 957b398d30e5..03875eaed51a 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -13,6 +13,7 @@
#ifndef __DMA_BUF_H__
#define __DMA_BUF_H__
+#include <linux/dma-buf-map.h>
#include <linux/file.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
@@ -145,7 +146,8 @@ struct dma_buf_ops {
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
* already mapped into the device address space of the &device attached
- * with the provided &dma_buf_attachment.
+ * with the provided &dma_buf_attachment. The addresses and lengths in
+ * the scatter list are PAGE_SIZE aligned.
*
* On failure, returns a negative error value wrapped into a pointer.
* May also return -EINTR when a signal was received while being
@@ -265,8 +267,8 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
- void *(*vmap)(struct dma_buf *);
- void (*vunmap)(struct dma_buf *, void *vaddr);
+ int (*vmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
+ void (*vunmap)(struct dma_buf *dmabuf, struct dma_buf_map *map);
};
/**
@@ -309,7 +311,7 @@ struct dma_buf {
const struct dma_buf_ops *ops;
struct mutex lock;
unsigned vmapping_counter;
- void *vmap_ptr;
+ struct dma_buf_map vmap_ptr;
const char *exp_name;
const char *name;
spinlock_t name_lock;
@@ -502,6 +504,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
-void *dma_buf_vmap(struct dma_buf *);
-void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+int dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/platform_data/shmob_drm.h b/include/linux/platform_data/shmob_drm.h
index fe815d7d9f58..d661399b217d 100644
--- a/include/linux/platform_data/shmob_drm.h
+++ b/include/linux/platform_data/shmob_drm.h
@@ -10,8 +10,6 @@
#ifndef __SHMOB_DRM_H__
#define __SHMOB_DRM_H__
-#include <linux/kernel.h>
-
#include <drm/drm_mode.h>
enum shmob_drm_clk_source {
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 82f327801267..9f7e19c9416c 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -58,6 +58,30 @@ extern "C" {
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
+ * Modifiers must uniquely encode buffer layout. In other words, a buffer must
+ * match only a single modifier. A modifier must not be a subset of layouts of
+ * another modifier. For instance, it's incorrect to encode pitch alignment in
+ * a modifier: a buffer may match a 64-pixel aligned modifier and a 32-pixel
+ * aligned modifier. That said, modifiers can have implicit minimal
+ * requirements.
+ *
+ * For modifiers where the combination of fourcc code and modifier can alias,
+ * a canonical pair needs to be defined and used by all drivers. Preferred
+ * combinations are also encouraged where all combinations might lead to
+ * confusion and unnecessarily reduced interoperability. An example for the
+ * latter is AFBC, where the ABGR layouts are preferred over ARGB layouts.
+ *
+ * There are two kinds of modifier users:
+ *
+ * - Kernel and user-space drivers: for drivers it's important that modifiers
+ * don't alias, otherwise two drivers might support the same format but use
+ * different aliases, preventing them from sharing buffers in an efficient
+ * format.
+ * - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
+ * see modifiers as opaque tokens they can check for equality and intersect.
+ * These users musn't need to know to reason about the modifier value
+ * (i.e. they are not expected to extract information out of the modifier).
+ *
* Vendors should document their modifier usage in as much detail as
* possible, to ensure maximum compatibility across devices, drivers and
* applications.
@@ -155,6 +179,12 @@ extern "C" {
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+/*
+ * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
+ * of unused padding per component:
+ */
+#define DRM_FORMAT_AXBXGXRX106106106106 fourcc_code('A', 'B', '1', '0') /* [63:0] A:x:B:x:G:x:R:x 10:6:10:6:10:6:10:6 little endian */
+
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
@@ -320,7 +350,6 @@ extern "C" {
*/
/* Vendor Ids: */
-#define DRM_FORMAT_MOD_NONE 0
#define DRM_FORMAT_MOD_VENDOR_NONE 0
#define DRM_FORMAT_MOD_VENDOR_INTEL 0x01
#define DRM_FORMAT_MOD_VENDOR_AMD 0x02
@@ -392,6 +421,16 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0)
+/*
+ * Deprecated: use DRM_FORMAT_MOD_LINEAR instead
+ *
+ * The "none" format modifier doesn't actually mean that the modifier is
+ * implicit, instead it means that the layout is linear. Whether modifiers are
+ * used is out-of-band information carried in an API-specific way (e.g. in a
+ * flag for drm_mode_fb_cmd2).
+ */
+#define DRM_FORMAT_MOD_NONE 0
+
/* Intel framebuffer modifiers */
/*
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index f06a789f34cd..b9ec26e9c646 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -46,6 +46,7 @@ extern "C" {
#define DRM_VIRTGPU_TRANSFER_TO_HOST 0x07
#define DRM_VIRTGPU_WAIT 0x08
#define DRM_VIRTGPU_GET_CAPS 0x09
+#define DRM_VIRTGPU_RESOURCE_CREATE_BLOB 0x0a
#define VIRTGPU_EXECBUF_FENCE_FD_IN 0x01
#define VIRTGPU_EXECBUF_FENCE_FD_OUT 0x02
@@ -71,6 +72,9 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
+#define VIRTGPU_PARAM_RESOURCE_BLOB 3 /* DRM_VIRTGPU_RESOURCE_CREATE_BLOB */
+#define VIRTGPU_PARAM_HOST_VISIBLE 4 /* Host blob resources are mappable */
+#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
struct drm_virtgpu_getparam {
__u64 param;
@@ -100,7 +104,7 @@ struct drm_virtgpu_resource_info {
__u32 bo_handle;
__u32 res_handle;
__u32 size;
- __u32 stride;
+ __u32 blob_mem;
};
struct drm_virtgpu_3d_box {
@@ -117,6 +121,8 @@ struct drm_virtgpu_3d_transfer_to_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
struct drm_virtgpu_3d_transfer_from_host {
@@ -124,6 +130,8 @@ struct drm_virtgpu_3d_transfer_from_host {
struct drm_virtgpu_3d_box box;
__u32 level;
__u32 offset;
+ __u32 stride;
+ __u32 layer_stride;
};
#define VIRTGPU_WAIT_NOWAIT 1 /* like it */
@@ -140,6 +148,31 @@ struct drm_virtgpu_get_caps {
__u32 pad;
};
+struct drm_virtgpu_resource_create_blob {
+#define VIRTGPU_BLOB_MEM_GUEST 0x0001
+#define VIRTGPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTGPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTGPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTGPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob_mem */
+ __u32 blob_mem;
+ __u32 blob_flags;
+ __u32 bo_handle;
+ __u32 res_handle;
+ __u64 size;
+
+ /*
+ * for 3D contexts with VIRTGPU_BLOB_MEM_HOST3D_GUEST and
+ * VIRTGPU_BLOB_MEM_HOST3D otherwise, must be zero.
+ */
+ __u32 pad;
+ __u32 cmd_size;
+ __u64 cmd;
+ __u64 blob_id;
+};
+
#define DRM_IOCTL_VIRTGPU_MAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_MAP, struct drm_virtgpu_map)
@@ -175,6 +208,10 @@ struct drm_virtgpu_get_caps {
DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_GET_CAPS, \
struct drm_virtgpu_get_caps)
+#define DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB \
+ DRM_IOWR(DRM_COMMAND_BASE + DRM_VIRTGPU_RESOURCE_CREATE_BLOB, \
+ struct drm_virtgpu_resource_create_blob)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/virtio_gpu.h b/include/uapi/linux/virtio_gpu.h
index 747a5c5cc4e6..0ec6b610402c 100644
--- a/include/uapi/linux/virtio_gpu.h
+++ b/include/uapi/linux/virtio_gpu.h
@@ -55,6 +55,11 @@
*/
#define VIRTIO_GPU_F_RESOURCE_UUID 2
+/*
+ * VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB
+ */
+#define VIRTIO_GPU_F_RESOURCE_BLOB 3
+
enum virtio_gpu_ctrl_type {
VIRTIO_GPU_UNDEFINED = 0,
@@ -71,6 +76,8 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_GET_CAPSET,
VIRTIO_GPU_CMD_GET_EDID,
VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID,
+ VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB,
+ VIRTIO_GPU_CMD_SET_SCANOUT_BLOB,
/* 3d commands */
VIRTIO_GPU_CMD_CTX_CREATE = 0x0200,
@@ -81,6 +88,8 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D,
VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D,
VIRTIO_GPU_CMD_SUBMIT_3D,
+ VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB,
+ VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB,
/* cursor commands */
VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300,
@@ -93,6 +102,7 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_OK_CAPSET,
VIRTIO_GPU_RESP_OK_EDID,
VIRTIO_GPU_RESP_OK_RESOURCE_UUID,
+ VIRTIO_GPU_RESP_OK_MAP_INFO,
/* error responses */
VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200,
@@ -103,6 +113,11 @@ enum virtio_gpu_ctrl_type {
VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER,
};
+enum virtio_gpu_shm_id {
+ VIRTIO_GPU_SHM_ID_UNDEFINED = 0,
+ VIRTIO_GPU_SHM_ID_HOST_VISIBLE = 1
+};
+
#define VIRTIO_GPU_FLAG_FENCE (1 << 0)
struct virtio_gpu_ctrl_hdr {
@@ -359,4 +374,67 @@ struct virtio_gpu_resp_resource_uuid {
__u8 uuid[16];
};
+/* VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB */
+struct virtio_gpu_resource_create_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+#define VIRTIO_GPU_BLOB_MEM_GUEST 0x0001
+#define VIRTIO_GPU_BLOB_MEM_HOST3D 0x0002
+#define VIRTIO_GPU_BLOB_MEM_HOST3D_GUEST 0x0003
+
+#define VIRTIO_GPU_BLOB_FLAG_USE_MAPPABLE 0x0001
+#define VIRTIO_GPU_BLOB_FLAG_USE_SHAREABLE 0x0002
+#define VIRTIO_GPU_BLOB_FLAG_USE_CROSS_DEVICE 0x0004
+ /* zero is invalid blob mem */
+ __le32 blob_mem;
+ __le32 blob_flags;
+ __le32 nr_entries;
+ __le64 blob_id;
+ __le64 size;
+ /*
+ * sizeof(nr_entries * virtio_gpu_mem_entry) bytes follow
+ */
+};
+
+/* VIRTIO_GPU_CMD_SET_SCANOUT_BLOB */
+struct virtio_gpu_set_scanout_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ struct virtio_gpu_rect r;
+ __le32 scanout_id;
+ __le32 resource_id;
+ __le32 width;
+ __le32 height;
+ __le32 format;
+ __le32 padding;
+ __le32 strides[4];
+ __le32 offsets[4];
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB */
+struct virtio_gpu_resource_map_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+ __le64 offset;
+};
+
+/* VIRTIO_GPU_RESP_OK_MAP_INFO */
+#define VIRTIO_GPU_MAP_CACHE_MASK 0x0f
+#define VIRTIO_GPU_MAP_CACHE_NONE 0x00
+#define VIRTIO_GPU_MAP_CACHE_CACHED 0x01
+#define VIRTIO_GPU_MAP_CACHE_UNCACHED 0x02
+#define VIRTIO_GPU_MAP_CACHE_WC 0x03
+struct virtio_gpu_resp_map_info {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __u32 map_info;
+ __u32 padding;
+};
+
+/* VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB */
+struct virtio_gpu_resource_unmap_blob {
+ struct virtio_gpu_ctrl_hdr hdr;
+ __le32 resource_id;
+ __le32 padding;
+};
+
#endif