summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c5
-rw-r--r--drivers/accel/amdxdna/aie2_psp.c4
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.c193
-rw-r--r--drivers/accel/amdxdna/amdxdna_gem.h3
-rw-r--r--drivers/accel/habanalabs/common/device.c25
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c1
-rw-r--r--drivers/accel/ivpu/ivpu_drv.h15
-rw-r--r--drivers/accel/ivpu/ivpu_hw.c4
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c1
-rw-r--r--drivers/accel/ivpu/ivpu_job.c81
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c15
-rw-r--r--drivers/accel/qaic/Makefile1
-rw-r--r--drivers/accel/qaic/qaic.h10
-rw-r--r--drivers/accel/qaic/qaic_data.c1
-rw-r--r--drivers/accel/qaic/qaic_drv.c6
-rw-r--r--drivers/accel/qaic/qaic_ras.c642
-rw-r--r--drivers/accel/qaic/qaic_ras.h10
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/apei/einj-core.c9
-rw-r--r--drivers/acpi/cppc_acpi.c2
-rw-r--r--drivers/acpi/ec.c17
-rw-r--r--drivers/acpi/internal.h6
-rw-r--r--drivers/acpi/processor_driver.c3
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/acpi/resource.c7
-rw-r--r--drivers/base/faux.c3
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/bluetooth/btintel_pcie.c31
-rw-r--r--drivers/bluetooth/btintel_pcie.h10
-rw-r--r--drivers/cpufreq/rcpufreq_dt.rs4
-rw-r--r--drivers/dma-buf/Kconfig1
-rw-r--r--drivers/dma-buf/dma-buf.c2
-rw-r--r--drivers/dma-buf/dma-fence-chain.c7
-rw-r--r--drivers/dma-buf/dma-fence.c167
-rw-r--r--drivers/dma-buf/sw_sync.c2
-rw-r--r--drivers/dma-buf/sync_file.c24
-rw-r--r--drivers/dma-buf/udmabuf.c28
-rw-r--r--drivers/gpu/drm/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/adp/adp-mipi.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c8
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c23
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c69
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig5
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h53
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c77
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c57
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c357
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c9
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c1
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c7
-rw-r--r--drivers/gpu/drm/bridge/aux-bridge.c9
-rw-r--r--drivers/gpu/drm/bridge/aux-hpd-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c72
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c8
-rw-r--r--drivers/gpu/drm/bridge/chipone-icn6211.c8
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c8
-rw-r--r--drivers/gpu/drm/bridge/cros-ec-anx7688.c8
-rw-r--r--drivers/gpu/drm/bridge/fsl-ldb.c7
-rw-r--r--drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c10
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c27
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c8
-rw-r--r--drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c8
-rw-r--r--drivers/gpu/drm/bridge/ite-it6263.c8
-rw-r--r--drivers/gpu/drm/bridge/ite-it6505.c8
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c8
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c8
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9211.c7
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611.c8
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c9
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c11
-rw-r--r--drivers/gpu/drm/bridge/microchip-lvds.c8
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c9
-rw-r--r--drivers/gpu/drm/bridge/panel.c13
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c8
-rw-r--r--drivers/gpu/drm/bridge/samsung-dsim.c1
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c7
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c8
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c8
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c8
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c9
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c9
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c9
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c8
-rw-r--r--drivers/gpu/drm/bridge/tc358775.c8
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-dlpc3433.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c67
-rw-r--r--drivers/gpu/drm/bridge/ti-tdp158.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c8
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c8
-rw-r--r--drivers/gpu/drm/clients/drm_client_setup.c2
-rw-r--r--drivers/gpu/drm/display/Kconfig13
-rw-r--r--drivers/gpu/drm/display/Makefile4
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c108
-rw-r--r--drivers/gpu/drm/display/drm_dp_aux_bus.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/display/drm_dp_tunnel.c1
-rw-r--r--drivers/gpu/drm/display/drm_dsc_helper.c1
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_audio_helper.c4
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_helper.c193
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c65
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_helper.c1
-rw-r--r--drivers/gpu/drm/display/drm_hdmi_state_helper.c129
-rw-r--r--drivers/gpu/drm/display/drm_scdc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c2
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c161
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c1
-rw-r--r--drivers/gpu/drm/drm_auth.c65
-rw-r--r--drivers/gpu/drm/drm_bridge.c20
-rw-r--r--drivers/gpu/drm/drm_bridge_helper.c2
-rw-r--r--drivers/gpu/drm/drm_buddy.c1
-rw-r--r--drivers/gpu/drm/drm_client.c36
-rw-r--r--drivers/gpu/drm/drm_client_event.c1
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c2
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c210
-rw-r--r--drivers/gpu/drm/drm_connector.c42
-rw-r--r--drivers/gpu/drm/drm_damage_helper.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c10
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c22
-rw-r--r--drivers/gpu/drm/drm_edid.c1
-rw-r--r--drivers/gpu/drm/drm_exec.c2
-rw-r--r--drivers/gpu/drm/drm_fb_dma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_shmem.c1
-rw-r--r--drivers/gpu/drm/drm_fbdev_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_file.c7
-rw-r--r--drivers/gpu/drm/drm_flip_work.c1
-rw-r--r--drivers/gpu/drm/drm_format_helper.c247
-rw-r--r--drivers/gpu/drm/drm_format_internal.h10
-rw-r--r--drivers/gpu/drm/drm_fourcc.c27
-rw-r--r--drivers/gpu/drm/drm_gem.c33
-rw-r--r--drivers/gpu/drm/drm_gem_atomic_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c6
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c61
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c1
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c80
-rw-r--r--drivers/gpu/drm/drm_gpusvm.c35
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c1
-rw-r--r--drivers/gpu/drm/drm_internal.h4
-rw-r--r--drivers/gpu/drm/drm_managed.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c1
-rw-r--r--drivers/gpu/drm/drm_mode_config.c1
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c2
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c52
-rw-r--r--drivers/gpu/drm/drm_panel_backlight_quirks.c1
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c1
-rw-r--r--drivers/gpu/drm/drm_panic.c1
-rw-r--r--drivers/gpu/drm/drm_pci.c1
-rw-r--r--drivers/gpu/drm/drm_plane.c1
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c58
-rw-r--r--drivers/gpu/drm/drm_print.c1
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c1
-rw-r--r--drivers/gpu/drm/drm_self_refresh_helper.c1
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c1
-rw-r--r--drivers/gpu/drm/drm_suballoc.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c1
-rw-r--r--drivers/gpu/drm/drm_vblank_work.c2
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c1
-rw-r--r--drivers/gpu/drm/drm_writeback.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm.h4
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_modeset.c206
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c131
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c3
-rw-r--r--drivers/gpu/drm/i915/i915_request.c7
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c10
-rw-r--r--drivers/gpu/drm/imagination/pvr_job.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c5
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h2
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/Makefile1
-rw-r--r--drivers/gpu/drm/imx/dc/Kconfig13
-rw-r--r--drivers/gpu/drm/imx/dc/Makefile7
-rw-r--r--drivers/gpu/drm/imx/dc/dc-cf.c172
-rw-r--r--drivers/gpu/drm/imx/dc/dc-crtc.c555
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.c177
-rw-r--r--drivers/gpu/drm/imx/dc/dc-de.h59
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.c293
-rw-r--r--drivers/gpu/drm/imx/dc/dc-drv.h102
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ed.c288
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fg.c376
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fl.c185
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.c258
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fu.h129
-rw-r--r--drivers/gpu/drm/imx/dc/dc-fw.c222
-rw-r--r--drivers/gpu/drm/imx/dc/dc-ic.c282
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.c143
-rw-r--r--drivers/gpu/drm/imx/dc/dc-kms.h131
-rw-r--r--drivers/gpu/drm/imx/dc/dc-lb.c325
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.c158
-rw-r--r--drivers/gpu/drm/imx/dc/dc-pe.h101
-rw-r--r--drivers/gpu/drm/imx/dc/dc-plane.c224
-rw-r--r--drivers/gpu/drm/imx/dc/dc-tc.c141
-rw-r--r--drivers/gpu/drm/lib/drm_random.c1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c6
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h3
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dp.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c8
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c10
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_dsi.c10
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c12
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c55
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200er.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200ev.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_g200se.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c82
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c72
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sched.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c23
-rw-r--r--drivers/gpu/drm/panel/Kconfig26
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c6
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c10
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c11
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx83112a.c10
-rw-r--r--drivers/gpu/drm/panel/panel-himax-hx8394.c153
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9341.c11
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9805.c12
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9806e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c236
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-ej030na.c11
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c11
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c10
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c10
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c11
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c10
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lincolntech-lcd197.c11
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c12
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c11
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c11
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c10
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3051d.c11
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c12
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c9
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36672e.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c10
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-ota5601a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67200.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c11
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm69380.c10
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r61307.c325
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r69328.c281
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams581vf01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ams639rq08.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-atna33xc20.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-db7430.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c11
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c11
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c27
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c132
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7703.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c11
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c11
-rw-r--r--drivers/gpu/drm/panel/panel-summit.c10
-rw-r--r--drivers/gpu/drm/panel/panel-synaptics-r63353.c11
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c11
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c10
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c11
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c9
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c257
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm692e5.c10
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c11
-rw-r--r--drivers/gpu/drm/panel/panel-widechips-ws2401.c11
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c81
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c186
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h66
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_device.h89
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c63
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c9
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.c159
-rw-r--r--drivers/gpu/drm/panthor/panthor_gpu.h12
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c36
-rw-r--r--drivers/gpu/drm/panthor/panthor_regs.h100
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c5
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.h3
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c8
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c8
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c11
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c44
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c355
-rw-r--r--drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h56
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c9
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h103
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c26
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_internal.h2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c37
-rw-r--r--drivers/gpu/drm/scheduler/tests/mock_scheduler.c8
-rw-r--r--drivers/gpu/drm/scheduler/tests/sched_tests.h1
-rw-r--r--drivers/gpu/drm/sitronix/Kconfig11
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c29
-rw-r--r--drivers/gpu/drm/stm/lvds.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c168
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h31
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c27
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_scaler.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_scaler.c6
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_helper.h4
-rw-r--r--drivers/gpu/drm/sysfb/drm_sysfb_modeset.c138
-rw-r--r--drivers/gpu/drm/sysfb/efidrm.c6
-rw-r--r--drivers/gpu/drm/sysfb/ofdrm.c85
-rw-r--r--drivers/gpu/drm/sysfb/simpledrm.c5
-rw-r--r--drivers/gpu/drm/sysfb/vesadrm.c106
-rw-r--r--drivers/gpu/drm/tests/Makefile3
-rw-r--r--drivers/gpu/drm/tests/drm_bridge_test.c176
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c262
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c651
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_edid.h232
-rw-r--r--drivers/gpu/drm/tests/drm_kunit_helpers.c1
-rw-r--r--drivers/gpu/drm/tests/drm_sysfb_modeset_test.c168
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c4
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c195
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h13
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c1
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c2
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c2
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c3
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_mock_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_backup.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_internal.h58
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c1
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_submit.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c3
-rw-r--r--drivers/gpu/drm/vkms/tests/Makefile6
-rw-r--r--drivers/gpu/drm/vkms/tests/vkms_format_test.c280
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h35
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.c467
-rw-r--r--drivers/gpu/drm/vkms/vkms_formats.h9
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h19
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c510
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h21
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/xe/Kconfig6
-rw-r--r--drivers/gpu/drm/xe/Kconfig.debug11
-rw-r--r--drivers/gpu/drm/xe/Makefile3
-rw-r--r--drivers/gpu/drm/xe/abi/guc_actions_abi.h31
-rw-r--r--drivers/gpu/drm/xe/abi/guc_errors_abi.h14
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c50
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c6
-rw-r--r--drivers/gpu/drm/xe/regs/xe_mchbar_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h3
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pcode_regs.h2
-rw-r--r--drivers/gpu/drm/xe/regs/xe_pmt.h5
-rw-r--r--drivers/gpu/drm/xe/tests/xe_bo.c4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c11
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci.c63
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.c24
-rw-r--r--drivers/gpu/drm/xe/tests/xe_pci_test.h11
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c69
-rw-r--r--drivers/gpu/drm/xe/xe_bo_evict.c4
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.c160
-rw-r--r--drivers/gpu/drm/xe/xe_configfs.h11
-rw-r--r--drivers/gpu/drm/xe/xe_debugfs.c38
-rw-r--r--drivers/gpu/drm/xe/xe_device.c24
-rw-r--r--drivers/gpu/drm/xe/xe_device.h2
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_drv.h2
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.c245
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt.h24
-rw-r--r--drivers/gpu/drm/xe/xe_ggtt_types.h4
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_debugfs.c96
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c106
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c4
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.c285
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf.h9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h21
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c32
-rw-r--r--drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_topology.c44
-rw-r--r--drivers/gpu/drm/xe/xe_guc.c19
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ads.c27
-rw-r--r--drivers/gpu/drm/xe/xe_guc_buf.c4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c330
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.h7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct_types.h15
-rw-r--r--drivers/gpu/drm/xe/xe_guc_engine_activity.c2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_exec_queue_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.h2
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c17
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c32
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c37
-rw-r--r--drivers/gpu/drm/xe/xe_hw_fence.c5
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c264
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c99
-rw-r--r--drivers/gpu/drm/xe/xe_lrc_types.h3
-rw-r--r--drivers/gpu/drm/xe/xe_map.h18
-rw-r--r--drivers/gpu/drm/xe/xe_module.c11
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c214
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c44
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c1
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c135
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c147
-rw-r--r--drivers/gpu/drm/xe/xe_query.c4
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c17
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.c37
-rw-r--r--drivers/gpu/drm/xe/xe_shrinker.h4
-rw-r--r--drivers/gpu/drm/xe/xe_sriov_vf.c58
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c308
-rw-r--r--drivers/gpu/drm/xe/xe_svm.h138
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c6
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.c254
-rw-r--r--drivers/gpu/drm/xe/xe_tile_sriov_vf.h18
-rw-r--r--drivers/gpu/drm/xe/xe_tuning.c5
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c56
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c381
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h24
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h26
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.c4
-rw-r--r--drivers/gpu/drm/xe/xe_vsec.h4
-rw-r--r--drivers/gpu/drm/xe/xe_wa.c46
-rw-r--r--drivers/gpu/drm/xe/xe_wa_oob.rules11
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c31
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dpsub.c1
-rw-r--r--drivers/idle/intel_idle.c12
-rw-r--r--drivers/iommu/tegra-smmu.c4
-rw-r--r--drivers/net/dsa/b53/b53_common.c6
-rw-r--r--drivers/net/ethernet/airoha/airoha_regs.h3
-rw-r--r--drivers/net/ethernet/freescale/enetc/Kconfig6
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c11
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h1
-rw-r--r--drivers/net/macsec.c40
-rw-r--r--drivers/net/netconsole.c3
-rw-r--r--drivers/net/netdevsim/netdev.c3
-rw-r--r--drivers/net/phy/mdio_bus.c12
-rw-r--r--drivers/net/phy/phy_caps.c18
-rw-r--r--drivers/net/usb/r8152.c1
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c33
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c4
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c29
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c148
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h10
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c127
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c49
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c6
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c6
-rw-r--r--drivers/nvme/host/ioctl.c21
-rw-r--r--drivers/pinctrl/pinctrl-st.c5
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8084.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5018.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5332.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq5424.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8064.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq8074.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq9574.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9607.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-mdm9615.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c11
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8226.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8660.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8909.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8916.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8917.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8953.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8960.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8976.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8994.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8996.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8998.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm8x74.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcm2290.c10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs404.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs615.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qcs8300.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdf2xxx.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-qdu1000.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sa8775p.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sar2130p.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7180.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8180x.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc8280xp.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm660.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm670.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdm845.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx55.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx75.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm4450.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6115.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6125.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6350.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm6375.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm7150.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8150.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8250.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8350.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8550.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8650.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8750.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-x1e80100.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c8
-rw-r--r--drivers/ptp/ptp_private.h12
-rw-r--r--drivers/rapidio/rio_cm.c3
-rw-r--r--drivers/regulator/max20086-regulator.c6
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/scsi/mvsas/mv_defs.h4
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c11
-rw-r--r--drivers/scsi/storvsc_drv.c10
-rw-r--r--drivers/spi/spi-loongson-core.c1
-rw-r--r--drivers/spi/spi-offload.c2
-rw-r--r--drivers/spi/spi-omap2-mcspi.c30
-rw-r--r--drivers/spi/spi-pci1xxxx.c4
-rw-r--r--drivers/spi/spi-stm32-ospi.c24
-rw-r--r--drivers/ufs/core/ufshcd.c7
-rw-r--r--drivers/video/Kconfig18
-rw-r--r--drivers/video/fbdev/c2p_iplan2.c1
-rw-r--r--drivers/video/fbdev/c2p_planar.c1
-rw-r--r--drivers/video/fbdev/core/Kconfig15
-rw-r--r--drivers/video/fbdev/core/cfbcopyarea.c2
-rw-r--r--drivers/video/fbdev/core/cfbfillrect.c2
-rw-r--r--drivers/video/fbdev/core/cfbimgblt.c2
-rw-r--r--drivers/video/fbdev/core/fb_ddc.c1
-rw-r--r--drivers/video/fbdev/core/fb_defio.c1
-rw-r--r--drivers/video/fbdev/core/fb_io_fops.c1
-rw-r--r--drivers/video/fbdev/core/fb_sys_fops.c2
-rw-r--r--drivers/video/fbdev/core/fbcmap.c1
-rw-r--r--drivers/video/fbdev/core/fbcon.c1
-rw-r--r--drivers/video/fbdev/core/fbmon.c5
-rw-r--r--drivers/video/fbdev/core/modedb.c1
-rw-r--r--drivers/video/fbdev/core/svgalib.c1
-rw-r--r--drivers/video/fbdev/core/syscopyarea.c2
-rw-r--r--drivers/video/fbdev/core/sysfillrect.c2
-rw-r--r--drivers/video/fbdev/core/sysimgblt.c2
-rw-r--r--drivers/video/fbdev/cyber2000fb.c36
-rw-r--r--drivers/video/fbdev/cyber2000fb.h2
-rw-r--r--drivers/video/fbdev/macmodes.c3
-rw-r--r--drivers/video/fbdev/matrox/g450_pll.c26
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_DAC1064.c47
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_Ti3026.c1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_accel.c2
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c1
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_g450.c62
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_misc.c21
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xx-i2c.c1
-rw-r--r--drivers/video/fbdev/omap/lcd_dma.c1
-rw-r--r--drivers/video/fbdev/omap/lcdc.c2
-rw-r--r--drivers/video/fbdev/omap/omapfb_main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/apply.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/output.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/vrfb.c1
-rw-r--r--drivers/video/fbdev/pxafb.c17
-rw-r--r--drivers/video/fbdev/sbuslib.c1
-rw-r--r--drivers/video/fbdev/sis/sis.h2
-rw-r--r--drivers/video/fbdev/sis/sis_main.c25
-rw-r--r--drivers/video/fbdev/via/via-core.c1
-rw-r--r--drivers/video/fbdev/via/via-gpio.c1
-rw-r--r--drivers/video/fbdev/via/via_i2c.c1
-rw-r--r--drivers/video/fbdev/wmt_ge_rops.c1
704 files changed, 17332 insertions, 5673 deletions
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index e04549f64d69..f20999f2d668 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -566,7 +566,7 @@ int aie2_hwctx_init(struct amdxdna_hwctx *hwctx)
.size = MAX_CHAIN_CMDBUF_SIZE,
};
- abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp, true);
+ abo = amdxdna_drm_alloc_dev_bo(&xdna->ddev, &args, client->filp);
if (IS_ERR(abo)) {
ret = PTR_ERR(abo);
goto free_cmd_bufs;
@@ -848,7 +848,8 @@ int aie2_cmd_submit(struct amdxdna_hwctx *hwctx, struct amdxdna_sched_job *job,
goto up_sem;
}
- ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx);
+ ret = drm_sched_job_init(&job->base, &hwctx->priv->entity, 1, hwctx,
+ hwctx->client->filp->client_id);
if (ret) {
XDNA_ERR(xdna, "DRM job init failed, ret %d", ret);
goto free_chain;
diff --git a/drivers/accel/amdxdna/aie2_psp.c b/drivers/accel/amdxdna/aie2_psp.c
index dc3a072ce3b6..f28a060a8810 100644
--- a/drivers/accel/amdxdna/aie2_psp.c
+++ b/drivers/accel/amdxdna/aie2_psp.c
@@ -126,8 +126,8 @@ struct psp_device *aie2m_psp_create(struct drm_device *ddev, struct psp_config *
psp->ddev = ddev;
memcpy(psp->psp_regs, conf->psp_regs, sizeof(psp->psp_regs));
- psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN) + PSP_FW_ALIGN;
- psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz, GFP_KERNEL);
+ psp->fw_buf_sz = ALIGN(conf->fw_size, PSP_FW_ALIGN);
+ psp->fw_buffer = drmm_kmalloc(ddev, psp->fw_buf_sz + PSP_FW_ALIGN, GFP_KERNEL);
if (!psp->fw_buffer) {
drm_err(ddev, "no memory for fw buffer");
return NULL;
diff --git a/drivers/accel/amdxdna/amdxdna_gem.c b/drivers/accel/amdxdna/amdxdna_gem.c
index 26831ec69f89..0f85a0105178 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.c
+++ b/drivers/accel/amdxdna/amdxdna_gem.c
@@ -24,40 +24,79 @@
MODULE_IMPORT_NS("DMA_BUF");
static int
-amdxdna_gem_insert_node_locked(struct amdxdna_gem_obj *abo, bool use_vmap)
+amdxdna_gem_heap_alloc(struct amdxdna_gem_obj *abo)
{
struct amdxdna_client *client = abo->client;
struct amdxdna_dev *xdna = client->xdna;
struct amdxdna_mem *mem = &abo->mem;
+ struct amdxdna_gem_obj *heap;
u64 offset;
u32 align;
int ret;
+ mutex_lock(&client->mm_lock);
+
+ heap = client->dev_heap;
+ if (!heap) {
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
+ XDNA_ERR(xdna, "Invalid dev heap userptr");
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
+ if (mem->size == 0 || mem->size > heap->mem.size) {
+ XDNA_ERR(xdna, "Invalid dev bo size 0x%lx, limit 0x%lx",
+ mem->size, heap->mem.size);
+ ret = -EINVAL;
+ goto unlock_out;
+ }
+
align = 1 << max(PAGE_SHIFT, xdna->dev_info->dev_mem_buf_shift);
- ret = drm_mm_insert_node_generic(&abo->dev_heap->mm, &abo->mm_node,
+ ret = drm_mm_insert_node_generic(&heap->mm, &abo->mm_node,
mem->size, align,
0, DRM_MM_INSERT_BEST);
if (ret) {
XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
- return ret;
+ goto unlock_out;
}
mem->dev_addr = abo->mm_node.start;
- offset = mem->dev_addr - abo->dev_heap->mem.dev_addr;
- mem->userptr = abo->dev_heap->mem.userptr + offset;
- mem->pages = &abo->dev_heap->base.pages[offset >> PAGE_SHIFT];
- mem->nr_pages = mem->size >> PAGE_SHIFT;
-
- if (use_vmap) {
- mem->kva = vmap(mem->pages, mem->nr_pages, VM_MAP, PAGE_KERNEL);
- if (!mem->kva) {
- XDNA_ERR(xdna, "Failed to vmap");
- drm_mm_remove_node(&abo->mm_node);
- return -EFAULT;
- }
- }
+ offset = mem->dev_addr - heap->mem.dev_addr;
+ mem->userptr = heap->mem.userptr + offset;
+ mem->kva = heap->mem.kva + offset;
- return 0;
+ drm_gem_object_get(to_gobj(heap));
+
+unlock_out:
+ mutex_unlock(&client->mm_lock);
+
+ return ret;
+}
+
+static void
+amdxdna_gem_destroy_obj(struct amdxdna_gem_obj *abo)
+{
+ mutex_destroy(&abo->lock);
+ kfree(abo);
+}
+
+static void
+amdxdna_gem_heap_free(struct amdxdna_gem_obj *abo)
+{
+ struct amdxdna_gem_obj *heap;
+
+ mutex_lock(&abo->client->mm_lock);
+
+ drm_mm_remove_node(&abo->mm_node);
+
+ heap = abo->client->dev_heap;
+ drm_gem_object_put(to_gobj(heap));
+
+ mutex_unlock(&abo->client->mm_lock);
}
static bool amdxdna_hmm_invalidate(struct mmu_interval_notifier *mni,
@@ -213,6 +252,20 @@ free_map:
return ret;
}
+static void amdxdna_gem_dev_obj_free(struct drm_gem_object *gobj)
+{
+ struct amdxdna_dev *xdna = to_xdna_dev(gobj->dev);
+ struct amdxdna_gem_obj *abo = to_xdna_obj(gobj);
+
+ XDNA_DBG(xdna, "BO type %d xdna_addr 0x%llx", abo->type, abo->mem.dev_addr);
+ if (abo->pinned)
+ amdxdna_gem_unpin(abo);
+
+ amdxdna_gem_heap_free(abo);
+ drm_gem_object_release(gobj);
+ amdxdna_gem_destroy_obj(abo);
+}
+
static int amdxdna_insert_pages(struct amdxdna_gem_obj *abo,
struct vm_area_struct *vma)
{
@@ -374,19 +427,6 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
if (abo->pinned)
amdxdna_gem_unpin(abo);
- if (abo->type == AMDXDNA_BO_DEV) {
- mutex_lock(&abo->client->mm_lock);
- drm_mm_remove_node(&abo->mm_node);
- mutex_unlock(&abo->client->mm_lock);
-
- vunmap(abo->mem.kva);
- drm_gem_object_put(to_gobj(abo->dev_heap));
- drm_gem_object_release(gobj);
- mutex_destroy(&abo->lock);
- kfree(abo);
- return;
- }
-
if (abo->type == AMDXDNA_BO_DEV_HEAP)
drm_mm_takedown(&abo->mm);
@@ -402,7 +442,7 @@ static void amdxdna_gem_obj_free(struct drm_gem_object *gobj)
}
static const struct drm_gem_object_funcs amdxdna_gem_dev_obj_funcs = {
- .free = amdxdna_gem_obj_free,
+ .free = amdxdna_gem_dev_obj_free,
};
static const struct drm_gem_object_funcs amdxdna_gem_shmem_funcs = {
@@ -527,6 +567,7 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
@@ -553,18 +594,26 @@ amdxdna_drm_create_dev_heap(struct drm_device *dev,
shmem->map_wc = false;
abo = to_xdna_obj(&shmem->base);
-
abo->type = AMDXDNA_BO_DEV_HEAP;
abo->client = client;
abo->mem.dev_addr = client->xdna->dev_info->dev_mem_base;
drm_mm_init(&abo->mm, abo->mem.dev_addr, abo->mem.size);
+ ret = drm_gem_vmap(to_gobj(abo), &map);
+ if (ret) {
+ XDNA_ERR(xdna, "Vmap heap bo failed, ret %d", ret);
+ goto release_obj;
+ }
+ abo->mem.kva = map.vaddr;
+
client->dev_heap = abo;
drm_gem_object_get(to_gobj(abo));
mutex_unlock(&client->mm_lock);
return abo;
+release_obj:
+ drm_gem_object_put(to_gobj(abo));
mm_unlock:
mutex_unlock(&client->mm_lock);
return ERR_PTR(ret);
@@ -573,58 +622,32 @@ mm_unlock:
struct amdxdna_gem_obj *
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
- struct drm_file *filp, bool use_vmap)
+ struct drm_file *filp)
{
struct amdxdna_client *client = filp->driver_priv;
struct amdxdna_dev *xdna = to_xdna_dev(dev);
size_t aligned_sz = PAGE_ALIGN(args->size);
- struct amdxdna_gem_obj *abo, *heap;
+ struct amdxdna_gem_obj *abo;
int ret;
- mutex_lock(&client->mm_lock);
- heap = client->dev_heap;
- if (!heap) {
- ret = -EINVAL;
- goto mm_unlock;
- }
-
- if (heap->mem.userptr == AMDXDNA_INVALID_ADDR) {
- XDNA_ERR(xdna, "Invalid dev heap userptr");
- ret = -EINVAL;
- goto mm_unlock;
- }
-
- if (args->size > heap->mem.size) {
- XDNA_ERR(xdna, "Invalid dev bo size 0x%llx, limit 0x%lx",
- args->size, heap->mem.size);
- ret = -EINVAL;
- goto mm_unlock;
- }
-
abo = amdxdna_gem_create_obj(&xdna->ddev, aligned_sz);
- if (IS_ERR(abo)) {
- ret = PTR_ERR(abo);
- goto mm_unlock;
- }
+ if (IS_ERR(abo))
+ return abo;
+
to_gobj(abo)->funcs = &amdxdna_gem_dev_obj_funcs;
abo->type = AMDXDNA_BO_DEV;
abo->client = client;
- abo->dev_heap = heap;
- ret = amdxdna_gem_insert_node_locked(abo, use_vmap);
+
+ ret = amdxdna_gem_heap_alloc(abo);
if (ret) {
XDNA_ERR(xdna, "Failed to alloc dev bo memory, ret %d", ret);
- goto mm_unlock;
+ amdxdna_gem_destroy_obj(abo);
+ return ERR_PTR(ret);
}
- drm_gem_object_get(to_gobj(heap));
drm_gem_private_object_init(&xdna->ddev, to_gobj(abo), aligned_sz);
- mutex_unlock(&client->mm_lock);
return abo;
-
-mm_unlock:
- mutex_unlock(&client->mm_lock);
- return ERR_PTR(ret);
}
static struct amdxdna_gem_obj *
@@ -632,10 +655,10 @@ amdxdna_drm_create_cmd_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
struct drm_file *filp)
{
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(NULL);
struct amdxdna_dev *xdna = to_xdna_dev(dev);
struct drm_gem_shmem_object *shmem;
struct amdxdna_gem_obj *abo;
- struct iosys_map map;
int ret;
if (args->size > XDNA_MAX_CMD_BO_SIZE) {
@@ -692,7 +715,7 @@ int amdxdna_drm_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_f
abo = amdxdna_drm_create_dev_heap(dev, args, filp);
break;
case AMDXDNA_BO_DEV:
- abo = amdxdna_drm_alloc_dev_bo(dev, args, filp, false);
+ abo = amdxdna_drm_alloc_dev_bo(dev, args, filp);
break;
case AMDXDNA_BO_CMD:
abo = amdxdna_drm_create_cmd_bo(dev, args, filp);
@@ -724,20 +747,13 @@ int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo)
struct amdxdna_dev *xdna = to_xdna_dev(to_gobj(abo)->dev);
int ret;
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->client->dev_heap;
+
if (is_import_bo(abo))
return 0;
- switch (abo->type) {
- case AMDXDNA_BO_SHMEM:
- case AMDXDNA_BO_DEV_HEAP:
- ret = drm_gem_shmem_pin(&abo->base);
- break;
- case AMDXDNA_BO_DEV:
- ret = drm_gem_shmem_pin(&abo->dev_heap->base);
- break;
- default:
- ret = -EOPNOTSUPP;
- }
+ ret = drm_gem_shmem_pin(&abo->base);
XDNA_DBG(xdna, "BO type %d ret %d", abo->type, ret);
return ret;
@@ -747,9 +763,6 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
{
int ret;
- if (abo->type == AMDXDNA_BO_DEV)
- abo = abo->dev_heap;
-
mutex_lock(&abo->lock);
ret = amdxdna_gem_pin_nolock(abo);
mutex_unlock(&abo->lock);
@@ -759,12 +772,12 @@ int amdxdna_gem_pin(struct amdxdna_gem_obj *abo)
void amdxdna_gem_unpin(struct amdxdna_gem_obj *abo)
{
+ if (abo->type == AMDXDNA_BO_DEV)
+ abo = abo->client->dev_heap;
+
if (is_import_bo(abo))
return;
- if (abo->type == AMDXDNA_BO_DEV)
- abo = abo->dev_heap;
-
mutex_lock(&abo->lock);
drm_gem_shmem_unpin(&abo->base);
mutex_unlock(&abo->lock);
@@ -855,10 +868,12 @@ int amdxdna_drm_sync_bo_ioctl(struct drm_device *dev,
if (is_import_bo(abo))
drm_clflush_sg(abo->base.sgt);
- else if (abo->type == AMDXDNA_BO_DEV)
- drm_clflush_pages(abo->mem.pages, abo->mem.nr_pages);
- else
+ else if (abo->mem.kva)
+ drm_clflush_virt_range(abo->mem.kva + args->offset, args->size);
+ else if (abo->base.pages)
drm_clflush_pages(abo->base.pages, gobj->size >> PAGE_SHIFT);
+ else
+ drm_WARN(&xdna->ddev, 1, "Can not get flush memory");
amdxdna_gem_unpin(abo);
diff --git a/drivers/accel/amdxdna/amdxdna_gem.h b/drivers/accel/amdxdna/amdxdna_gem.h
index aee97e971d6d..ae29db94a9d3 100644
--- a/drivers/accel/amdxdna/amdxdna_gem.h
+++ b/drivers/accel/amdxdna/amdxdna_gem.h
@@ -41,7 +41,6 @@ struct amdxdna_gem_obj {
/* Below members is uninitialized when needed */
struct drm_mm mm; /* For AMDXDNA_BO_DEV_HEAP */
- struct amdxdna_gem_obj *dev_heap; /* For AMDXDNA_BO_DEV */
struct drm_mm_node mm_node; /* For AMDXDNA_BO_DEV */
u32 assigned_hwctx;
struct dma_buf *dma_buf;
@@ -72,7 +71,7 @@ amdxdna_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf);
struct amdxdna_gem_obj *
amdxdna_drm_alloc_dev_bo(struct drm_device *dev,
struct amdxdna_drm_create_bo *args,
- struct drm_file *filp, bool use_vmap);
+ struct drm_file *filp);
int amdxdna_gem_pin_nolock(struct amdxdna_gem_obj *abo);
int amdxdna_gem_pin(struct amdxdna_gem_obj *abo);
diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
index 68eebed3b050..80fa08bf57bd 100644
--- a/drivers/accel/habanalabs/common/device.c
+++ b/drivers/accel/habanalabs/common/device.c
@@ -1066,28 +1066,11 @@ static bool is_pci_link_healthy(struct hl_device *hdev)
return (device_id == hdev->pdev->device);
}
-static void stringify_time_of_last_heartbeat(struct hl_device *hdev, char *time_str, size_t size,
- bool is_pq_hb)
-{
- time64_t seconds = is_pq_hb ? hdev->heartbeat_debug_info.last_pq_heartbeat_ts
- : hdev->heartbeat_debug_info.last_eq_heartbeat_ts;
- struct tm tm;
-
- if (!seconds)
- return;
-
- time64_to_tm(seconds, 0, &tm);
-
- snprintf(time_str, size, "%ld-%02d-%02d %02d:%02d:%02d (UTC)",
- tm.tm_year + 1900, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
-}
-
static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
{
struct eq_heartbeat_debug_info *heartbeat_debug_info = &hdev->heartbeat_debug_info;
u32 cpu_q_id = heartbeat_debug_info->cpu_queue_id, pq_pi_mask = (HL_QUEUE_LENGTH << 1) - 1;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- char pq_time_str[64] = "N/A", eq_time_str[64] = "N/A";
if (!prop->cpucp_info.eq_health_check_supported)
return true;
@@ -1095,17 +1078,15 @@ static bool hl_device_eq_heartbeat_received(struct hl_device *hdev)
if (!hdev->eq_heartbeat_received) {
dev_err(hdev->dev, "EQ heartbeat event was not received!\n");
- stringify_time_of_last_heartbeat(hdev, pq_time_str, sizeof(pq_time_str), true);
- stringify_time_of_last_heartbeat(hdev, eq_time_str, sizeof(eq_time_str), false);
dev_err(hdev->dev,
- "EQ: {CI %u, HB counter %u, last HB time: %s}, PQ: {PI: %u, CI: %u (%u), last HB time: %s}\n",
+ "EQ: {CI %u, HB counter %u, last HB time: %ptTs}, PQ: {PI: %u, CI: %u (%u), last HB time: %ptTs}\n",
hdev->event_queue.ci,
heartbeat_debug_info->heartbeat_event_counter,
- eq_time_str,
+ &hdev->heartbeat_debug_info.last_eq_heartbeat_ts,
hdev->kernel_queues[cpu_q_id].pi,
atomic_read(&hdev->kernel_queues[cpu_q_id].ci),
atomic_read(&hdev->kernel_queues[cpu_q_id].ci) & pq_pi_mask,
- pq_time_str);
+ &hdev->heartbeat_debug_info.last_pq_heartbeat_ts);
hl_eq_dump(hdev, &hdev->event_queue);
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 0e7748c5e117..3d6d52492536 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -704,6 +704,7 @@ static struct pci_device_id ivpu_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
{ }
};
MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h
index 5497e7030e91..62ab1c654e63 100644
--- a/drivers/accel/ivpu/ivpu_drv.h
+++ b/drivers/accel/ivpu/ivpu_drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#ifndef __IVPU_DRV_H__
@@ -26,6 +26,7 @@
#define PCI_DEVICE_ID_ARL 0xad1d
#define PCI_DEVICE_ID_LNL 0x643e
#define PCI_DEVICE_ID_PTL_P 0xb03e
+#define PCI_DEVICE_ID_WCL 0xfd3e
#define IVPU_HW_IP_37XX 37
#define IVPU_HW_IP_40XX 40
@@ -165,6 +166,7 @@ struct ivpu_device {
int boot;
int jsm;
int tdr;
+ int inference;
int autosuspend;
int d0i3_entry_msg;
int state_dump_msg;
@@ -207,10 +209,11 @@ extern bool ivpu_force_snoop;
#define IVPU_TEST_MODE_D0I3_MSG_ENABLE BIT(5)
#define IVPU_TEST_MODE_MIP_DISABLE BIT(6)
#define IVPU_TEST_MODE_DISABLE_TIMEOUTS BIT(8)
-#define IVPU_TEST_MODE_TURBO BIT(9)
-#define IVPU_TEST_MODE_CLK_RELINQ_DISABLE BIT(10)
-#define IVPU_TEST_MODE_CLK_RELINQ_ENABLE BIT(11)
-#define IVPU_TEST_MODE_D0I2_DISABLE BIT(12)
+#define IVPU_TEST_MODE_TURBO_ENABLE BIT(9)
+#define IVPU_TEST_MODE_TURBO_DISABLE BIT(10)
+#define IVPU_TEST_MODE_CLK_RELINQ_DISABLE BIT(11)
+#define IVPU_TEST_MODE_CLK_RELINQ_ENABLE BIT(12)
+#define IVPU_TEST_MODE_D0I2_DISABLE BIT(13)
extern int ivpu_test_mode;
struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
@@ -240,6 +243,7 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev)
case PCI_DEVICE_ID_LNL:
return IVPU_HW_IP_40XX;
case PCI_DEVICE_ID_PTL_P:
+ case PCI_DEVICE_ID_WCL:
return IVPU_HW_IP_50XX;
default:
dump_stack();
@@ -256,6 +260,7 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev)
return IVPU_HW_BTRS_MTL;
case PCI_DEVICE_ID_LNL:
case PCI_DEVICE_ID_PTL_P:
+ case PCI_DEVICE_ID_WCL:
return IVPU_HW_BTRS_LNL;
default:
dump_stack();
diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c
index 633160470c93..08dcc31b56f4 100644
--- a/drivers/accel/ivpu/ivpu_hw.c
+++ b/drivers/accel/ivpu/ivpu_hw.c
@@ -94,12 +94,14 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.boot = -1;
vdev->timeout.jsm = -1;
vdev->timeout.tdr = -1;
+ vdev->timeout.inference = -1;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = -1;
} else if (ivpu_is_fpga(vdev)) {
vdev->timeout.boot = 50;
vdev->timeout.jsm = 15000;
vdev->timeout.tdr = 30000;
+ vdev->timeout.inference = 900000;
vdev->timeout.autosuspend = -1;
vdev->timeout.d0i3_entry_msg = 500;
vdev->timeout.state_dump_msg = 10000;
@@ -107,6 +109,7 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.boot = 50;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 10000;
+ vdev->timeout.inference = 300000;
vdev->timeout.autosuspend = 100;
vdev->timeout.d0i3_entry_msg = 100;
vdev->timeout.state_dump_msg = 10;
@@ -114,6 +117,7 @@ static void timeouts_init(struct ivpu_device *vdev)
vdev->timeout.boot = 1000;
vdev->timeout.jsm = 500;
vdev->timeout.tdr = 2000;
+ vdev->timeout.inference = 60000;
if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
vdev->timeout.autosuspend = 10;
else
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index 823f6a57dc54..2bf9882ab52e 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -683,6 +683,7 @@ static void pwr_island_delay_set(struct ivpu_device *vdev)
return;
switch (ivpu_device_id(vdev)) {
+ case PCI_DEVICE_ID_WCL:
case PCI_DEVICE_ID_PTL_P:
post = high ? 18 : 0;
post1 = 0;
diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c
index fae8351aa330..060f1fc031d3 100644
--- a/drivers/accel/ivpu/ivpu_job.c
+++ b/drivers/accel/ivpu/ivpu_job.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2024 Intel Corporation
+ * Copyright (C) 2020-2025 Intel Corporation
*/
#include <drm/drm_file.h>
@@ -100,6 +100,43 @@ err_free_cmdq:
return NULL;
}
+/**
+ * ivpu_cmdq_get_entry_count - Calculate the number of entries in the command queue.
+ * @cmdq: Pointer to the command queue structure.
+ *
+ * Returns the number of entries that can fit in the command queue memory.
+ */
+static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
+{
+ size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
+
+ return size / sizeof(struct vpu_job_queue_entry);
+}
+
+/**
+ * ivpu_cmdq_get_flags - Get command queue flags based on input flags and test mode.
+ * @vdev: Pointer to the ivpu device structure.
+ * @flags: Input flags to determine the command queue flags.
+ *
+ * Returns the calculated command queue flags, considering both the input flags
+ * and the current test mode settings.
+ */
+static u32 ivpu_cmdq_get_flags(struct ivpu_device *vdev, u32 flags)
+{
+ u32 cmdq_flags = 0;
+
+ if ((flags & DRM_IVPU_CMDQ_FLAG_TURBO) && (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX))
+ cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+
+ /* Test mode can override the TURBO flag coming from the application */
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_ENABLE)
+ cmdq_flags |= VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+ if (ivpu_test_mode & IVPU_TEST_MODE_TURBO_DISABLE)
+ cmdq_flags &= ~VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
+
+ return cmdq_flags;
+}
+
static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
ivpu_preemption_buffers_free(file_priv->vdev, file_priv, cmdq);
@@ -107,8 +144,7 @@ static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *c
kfree(cmdq);
}
-static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority,
- bool is_legacy)
+static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 priority, u32 flags)
{
struct ivpu_device *vdev = file_priv->vdev;
struct ivpu_cmdq *cmdq = NULL;
@@ -121,10 +157,6 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
ivpu_err(vdev, "Failed to allocate command queue\n");
return NULL;
}
-
- cmdq->priority = priority;
- cmdq->is_legacy = is_legacy;
-
ret = xa_alloc_cyclic(&file_priv->cmdq_xa, &cmdq->id, cmdq, file_priv->cmdq_limit,
&file_priv->cmdq_id_next, GFP_KERNEL);
if (ret < 0) {
@@ -132,7 +164,15 @@ static struct ivpu_cmdq *ivpu_cmdq_create(struct ivpu_file_priv *file_priv, u8 p
goto err_free_cmdq;
}
- ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d\n", cmdq->id, file_priv->ctx.id);
+ cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
+ cmdq->priority = priority;
+
+ cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
+ cmdq->jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
+ cmdq->jobq->header.flags = ivpu_cmdq_get_flags(vdev, flags);
+
+ ivpu_dbg(vdev, JOB, "Command queue %d created, ctx %d, flags 0x%08x\n",
+ cmdq->id, file_priv->ctx.id, cmdq->jobq->header.flags);
return cmdq;
err_free_cmdq:
@@ -188,27 +228,14 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *
return ret;
}
-static void ivpu_cmdq_jobq_init(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
+static void ivpu_cmdq_jobq_reset(struct ivpu_device *vdev, struct vpu_job_queue *jobq)
{
- jobq->header.engine_idx = VPU_ENGINE_COMPUTE;
jobq->header.head = 0;
jobq->header.tail = 0;
- if (ivpu_test_mode & IVPU_TEST_MODE_TURBO) {
- ivpu_dbg(vdev, JOB, "Turbo mode enabled");
- jobq->header.flags = VPU_JOB_QUEUE_FLAGS_TURBO_MODE;
- }
-
wmb(); /* Flush WC buffer for jobq->header */
}
-static inline u32 ivpu_cmdq_get_entry_count(struct ivpu_cmdq *cmdq)
-{
- size_t size = ivpu_bo_size(cmdq->mem) - sizeof(struct vpu_job_queue_header);
-
- return size / sizeof(struct vpu_job_queue_entry);
-}
-
static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq)
{
struct ivpu_device *vdev = file_priv->vdev;
@@ -219,10 +246,7 @@ static int ivpu_cmdq_register(struct ivpu_file_priv *file_priv, struct ivpu_cmdq
if (cmdq->db_id)
return 0;
- cmdq->entry_count = ivpu_cmdq_get_entry_count(cmdq);
- cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(cmdq->mem);
-
- ivpu_cmdq_jobq_init(vdev, cmdq->jobq);
+ ivpu_cmdq_jobq_reset(vdev, cmdq->jobq);
if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
ret = ivpu_hws_cmdq_init(file_priv, cmdq, VPU_ENGINE_COMPUTE, cmdq->priority);
@@ -291,9 +315,10 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire_legacy(struct ivpu_file_priv *file_pr
break;
if (!cmdq) {
- cmdq = ivpu_cmdq_create(file_priv, priority, true);
+ cmdq = ivpu_cmdq_create(file_priv, priority, 0);
if (!cmdq)
return NULL;
+ cmdq->is_legacy = true;
}
return cmdq;
@@ -891,7 +916,7 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
mutex_lock(&file_priv->lock);
- cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), false);
+ cmdq = ivpu_cmdq_create(file_priv, ivpu_job_to_jsm_priority(args->priority), args->flags);
if (cmdq)
args->cmdq_id = cmdq->id;
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index ea30db181cd7..eacda1dbe840 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -33,8 +33,11 @@ static unsigned long ivpu_tdr_timeout_ms;
module_param_named(tdr_timeout_ms, ivpu_tdr_timeout_ms, ulong, 0644);
MODULE_PARM_DESC(tdr_timeout_ms, "Timeout for device hang detection, in milliseconds, 0 - default");
+static unsigned long ivpu_inference_timeout_ms;
+module_param_named(inference_timeout_ms, ivpu_inference_timeout_ms, ulong, 0644);
+MODULE_PARM_DESC(inference_timeout_ms, "Inference maximum duration, in milliseconds, 0 - default");
+
#define PM_RESCHEDULE_LIMIT 5
-#define PM_TDR_HEARTBEAT_LIMIT 30
static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev)
{
@@ -191,6 +194,10 @@ static void ivpu_job_timeout_work(struct work_struct *work)
{
struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
struct ivpu_device *vdev = pm->vdev;
+ unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
+ unsigned long inference_timeout_ms = ivpu_inference_timeout_ms ? ivpu_inference_timeout_ms :
+ vdev->timeout.inference;
+ u64 inference_max_retries;
u64 heartbeat;
if (ivpu_jsm_get_heartbeat(vdev, 0, &heartbeat) || heartbeat <= vdev->fw->last_heartbeat) {
@@ -198,8 +205,10 @@ static void ivpu_job_timeout_work(struct work_struct *work)
goto recovery;
}
- if (atomic_fetch_inc(&vdev->job_timeout_counter) > PM_TDR_HEARTBEAT_LIMIT) {
- ivpu_err(vdev, "Job timeout detected, heartbeat limit exceeded\n");
+ inference_max_retries = DIV_ROUND_UP(inference_timeout_ms, timeout_ms);
+ if (atomic_fetch_inc(&vdev->job_timeout_counter) >= inference_max_retries) {
+ ivpu_err(vdev, "Job timeout detected, heartbeat limit (%lld) exceeded\n",
+ inference_max_retries);
goto recovery;
}
diff --git a/drivers/accel/qaic/Makefile b/drivers/accel/qaic/Makefile
index 35e883515629..1106b876f737 100644
--- a/drivers/accel/qaic/Makefile
+++ b/drivers/accel/qaic/Makefile
@@ -10,6 +10,7 @@ qaic-y := \
qaic_control.o \
qaic_data.o \
qaic_drv.o \
+ qaic_ras.o \
qaic_timesync.o \
sahara.o
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index 0dbb8e32e4b9..c31081e42cee 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -167,6 +167,14 @@ struct qaic_device {
struct workqueue_struct *bootlog_wq;
/* Synchronizes access of pages in MHI bootlog device */
struct mutex bootlog_mutex;
+ /* MHI RAS channel device */
+ struct mhi_device *ras_ch;
+ /* Correctable error count */
+ unsigned int ce_count;
+ /* Un-correctable error count */
+ unsigned int ue_count;
+ /* Un-correctable non-fatal error count */
+ unsigned int ue_nf_count;
};
struct qaic_drm_device {
@@ -213,8 +221,6 @@ struct qaic_bo {
bool sliced;
/* Request ID of this BO if it is queued for execution */
u16 req_id;
- /* Handle assigned to this BO */
- u32 handle;
/* Wait on this for completion of DMA transfer of this BO */
struct completion xfer_done;
/*
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index 1bce1af7c72c..797289e9d780 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -731,7 +731,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (ret)
goto free_bo;
- bo->handle = args->handle;
drm_gem_object_put(obj);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index 3b415e2c9431..e31bcb0ecfc9 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -29,6 +29,7 @@
#include "mhi_controller.h"
#include "qaic.h"
#include "qaic_debugfs.h"
+#include "qaic_ras.h"
#include "qaic_timesync.h"
#include "sahara.h"
@@ -695,6 +696,10 @@ static int __init qaic_init(void)
if (ret)
pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
+ ret = qaic_ras_register();
+ if (ret)
+ pr_debug("qaic: qaic_ras_register failed %d\n", ret);
+
return 0;
free_mhi:
@@ -722,6 +727,7 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
+ qaic_ras_unregister();
qaic_bootlog_unregister();
qaic_timesync_deinit();
sahara_unregister();
diff --git a/drivers/accel/qaic/qaic_ras.c b/drivers/accel/qaic/qaic_ras.c
new file mode 100644
index 000000000000..914ffc4a9970
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ras.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
+/* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. */
+
+#include <asm/byteorder.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mhi.h>
+
+#include "qaic.h"
+#include "qaic_ras.h"
+
+#define MAGIC 0x55AA
+#define VERSION 0x2
+#define HDR_SZ 12
+#define NUM_TEMP_LVL 3
+#define POWER_BREAK BIT(0)
+
+enum msg_type {
+ MSG_PUSH, /* async push from device */
+ MSG_REQ, /* sync request to device */
+ MSG_RESP, /* sync response from device */
+};
+
+enum err_type {
+ CE, /* correctable error */
+ UE, /* uncorrectable error */
+ UE_NF, /* uncorrectable error that is non-fatal, expect a disruption */
+ ERR_TYPE_MAX,
+};
+
+static const char * const err_type_str[] = {
+ [CE] = "Correctable",
+ [UE] = "Uncorrectable",
+ [UE_NF] = "Uncorrectable Non-Fatal",
+};
+
+static const char * const err_class_str[] = {
+ [CE] = "Warning",
+ [UE] = "Fatal",
+ [UE_NF] = "Warning",
+};
+
+enum err_source {
+ SOC_MEM,
+ PCIE,
+ DDR,
+ SYS_BUS1,
+ SYS_BUS2,
+ NSP_MEM,
+ TSENS,
+};
+
+static const char * const err_src_str[TSENS + 1] = {
+ [SOC_MEM] = "SoC Memory",
+ [PCIE] = "PCIE",
+ [DDR] = "DDR",
+ [SYS_BUS1] = "System Bus source 1",
+ [SYS_BUS2] = "System Bus source 2",
+ [NSP_MEM] = "NSP Memory",
+ [TSENS] = "Temperature Sensors",
+};
+
+struct ras_data {
+ /* header start */
+ /* Magic number to validate the message */
+ u16 magic;
+ /* RAS version number */
+ u16 ver;
+ u32 seq_num;
+ /* RAS message type */
+ u8 type;
+ u8 id;
+ /* Size of RAS message without the header in byte */
+ u16 len;
+ /* header end */
+ s32 result;
+ /*
+ * Error source
+ * 0 : SoC Memory
+ * 1 : PCIE
+ * 2 : DDR
+ * 3 : System Bus source 1
+ * 4 : System Bus source 2
+ * 5 : NSP Memory
+ * 6 : Temperature Sensors
+ */
+ u32 source;
+ /*
+ * Stores the error type, there are three types of error in RAS
+ * 0 : correctable error (CE)
+ * 1 : uncorrectable error (UE)
+ * 2 : uncorrectable error that is non-fatal (UE_NF)
+ */
+ u32 err_type;
+ u32 err_threshold;
+ u32 ce_count;
+ u32 ue_count;
+ u32 intr_num;
+ /* Data specific to error source */
+ u8 syndrome[64];
+} __packed;
+
+struct soc_mem_syndrome {
+ u64 error_address[8];
+} __packed;
+
+struct nsp_mem_syndrome {
+ u32 error_address[8];
+ u8 nsp_id;
+} __packed;
+
+struct ddr_syndrome {
+ u32 count;
+ u32 irq_status;
+ u32 data_31_0[2];
+ u32 data_63_32[2];
+ u32 data_95_64[2];
+ u32 data_127_96[2];
+ u32 addr_lsb;
+ u16 addr_msb;
+ u16 parity_bits;
+ u16 instance;
+ u16 err_type;
+} __packed;
+
+struct tsens_syndrome {
+ u32 threshold_type;
+ s32 temp;
+} __packed;
+
+struct sysbus1_syndrome {
+ u32 slave;
+ u32 err_type;
+ u16 addr[8];
+ u8 instance;
+} __packed;
+
+struct sysbus2_syndrome {
+ u32 lsb3;
+ u32 msb3;
+ u32 lsb2;
+ u32 msb2;
+ u32 ext_id;
+ u16 path;
+ u16 op_type;
+ u16 len;
+ u16 redirect;
+ u8 valid;
+ u8 word_error;
+ u8 non_secure;
+ u8 opc;
+ u8 error_code;
+ u8 trans_type;
+ u8 addr_space;
+ u8 instance;
+} __packed;
+
+struct pcie_syndrome {
+ /* CE info */
+ u32 bad_tlp;
+ u32 bad_dllp;
+ u32 replay_rollover;
+ u32 replay_timeout;
+ u32 rx_err;
+ u32 internal_ce_count;
+ /* UE_NF info */
+ u32 fc_timeout;
+ u32 poison_tlp;
+ u32 ecrc_err;
+ u32 unsupported_req;
+ u32 completer_abort;
+ u32 completion_timeout;
+ /* UE info */
+ u32 addr;
+ u8 index;
+ /*
+ * Flag to indicate specific event of PCIe
+ * BIT(0): Power break (low power)
+ * BIT(1) to BIT(7): Reserved
+ */
+ u8 flag;
+} __packed;
+
+static const char * const threshold_type_str[NUM_TEMP_LVL] = {
+ [0] = "lower",
+ [1] = "upper",
+ [2] = "critical",
+};
+
+static void ras_msg_to_cpu(struct ras_data *msg)
+{
+ struct sysbus1_syndrome *sysbus1_syndrome = (struct sysbus1_syndrome *)&msg->syndrome[0];
+ struct sysbus2_syndrome *sysbus2_syndrome = (struct sysbus2_syndrome *)&msg->syndrome[0];
+ struct soc_mem_syndrome *soc_syndrome = (struct soc_mem_syndrome *)&msg->syndrome[0];
+ struct nsp_mem_syndrome *nsp_syndrome = (struct nsp_mem_syndrome *)&msg->syndrome[0];
+ struct tsens_syndrome *tsens_syndrome = (struct tsens_syndrome *)&msg->syndrome[0];
+ struct pcie_syndrome *pcie_syndrome = (struct pcie_syndrome *)&msg->syndrome[0];
+ struct ddr_syndrome *ddr_syndrome = (struct ddr_syndrome *)&msg->syndrome[0];
+ int i;
+
+ le16_to_cpus(&msg->magic);
+ le16_to_cpus(&msg->ver);
+ le32_to_cpus(&msg->seq_num);
+ le16_to_cpus(&msg->len);
+ le32_to_cpus(&msg->result);
+ le32_to_cpus(&msg->source);
+ le32_to_cpus(&msg->err_type);
+ le32_to_cpus(&msg->err_threshold);
+ le32_to_cpus(&msg->ce_count);
+ le32_to_cpus(&msg->ue_count);
+ le32_to_cpus(&msg->intr_num);
+
+ switch (msg->source) {
+ case SOC_MEM:
+ for (i = 0; i < 8; i++)
+ le64_to_cpus(&soc_syndrome->error_address[i]);
+ break;
+ case PCIE:
+ le32_to_cpus(&pcie_syndrome->bad_tlp);
+ le32_to_cpus(&pcie_syndrome->bad_dllp);
+ le32_to_cpus(&pcie_syndrome->replay_rollover);
+ le32_to_cpus(&pcie_syndrome->replay_timeout);
+ le32_to_cpus(&pcie_syndrome->rx_err);
+ le32_to_cpus(&pcie_syndrome->internal_ce_count);
+ le32_to_cpus(&pcie_syndrome->fc_timeout);
+ le32_to_cpus(&pcie_syndrome->poison_tlp);
+ le32_to_cpus(&pcie_syndrome->ecrc_err);
+ le32_to_cpus(&pcie_syndrome->unsupported_req);
+ le32_to_cpus(&pcie_syndrome->completer_abort);
+ le32_to_cpus(&pcie_syndrome->completion_timeout);
+ le32_to_cpus(&pcie_syndrome->addr);
+ break;
+ case DDR:
+ le16_to_cpus(&ddr_syndrome->instance);
+ le16_to_cpus(&ddr_syndrome->err_type);
+ le32_to_cpus(&ddr_syndrome->count);
+ le32_to_cpus(&ddr_syndrome->irq_status);
+ le32_to_cpus(&ddr_syndrome->data_31_0[0]);
+ le32_to_cpus(&ddr_syndrome->data_31_0[1]);
+ le32_to_cpus(&ddr_syndrome->data_63_32[0]);
+ le32_to_cpus(&ddr_syndrome->data_63_32[1]);
+ le32_to_cpus(&ddr_syndrome->data_95_64[0]);
+ le32_to_cpus(&ddr_syndrome->data_95_64[1]);
+ le32_to_cpus(&ddr_syndrome->data_127_96[0]);
+ le32_to_cpus(&ddr_syndrome->data_127_96[1]);
+ le16_to_cpus(&ddr_syndrome->parity_bits);
+ le16_to_cpus(&ddr_syndrome->addr_msb);
+ le32_to_cpus(&ddr_syndrome->addr_lsb);
+ break;
+ case SYS_BUS1:
+ le32_to_cpus(&sysbus1_syndrome->slave);
+ le32_to_cpus(&sysbus1_syndrome->err_type);
+ for (i = 0; i < 8; i++)
+ le16_to_cpus(&sysbus1_syndrome->addr[i]);
+ break;
+ case SYS_BUS2:
+ le16_to_cpus(&sysbus2_syndrome->op_type);
+ le16_to_cpus(&sysbus2_syndrome->len);
+ le16_to_cpus(&sysbus2_syndrome->redirect);
+ le16_to_cpus(&sysbus2_syndrome->path);
+ le32_to_cpus(&sysbus2_syndrome->ext_id);
+ le32_to_cpus(&sysbus2_syndrome->lsb2);
+ le32_to_cpus(&sysbus2_syndrome->msb2);
+ le32_to_cpus(&sysbus2_syndrome->lsb3);
+ le32_to_cpus(&sysbus2_syndrome->msb3);
+ break;
+ case NSP_MEM:
+ for (i = 0; i < 8; i++)
+ le32_to_cpus(&nsp_syndrome->error_address[i]);
+ break;
+ case TSENS:
+ le32_to_cpus(&tsens_syndrome->threshold_type);
+ le32_to_cpus(&tsens_syndrome->temp);
+ break;
+ }
+}
+
+static void decode_ras_msg(struct qaic_device *qdev, struct ras_data *msg)
+{
+ struct sysbus1_syndrome *sysbus1_syndrome = (struct sysbus1_syndrome *)&msg->syndrome[0];
+ struct sysbus2_syndrome *sysbus2_syndrome = (struct sysbus2_syndrome *)&msg->syndrome[0];
+ struct soc_mem_syndrome *soc_syndrome = (struct soc_mem_syndrome *)&msg->syndrome[0];
+ struct nsp_mem_syndrome *nsp_syndrome = (struct nsp_mem_syndrome *)&msg->syndrome[0];
+ struct tsens_syndrome *tsens_syndrome = (struct tsens_syndrome *)&msg->syndrome[0];
+ struct pcie_syndrome *pcie_syndrome = (struct pcie_syndrome *)&msg->syndrome[0];
+ struct ddr_syndrome *ddr_syndrome = (struct ddr_syndrome *)&msg->syndrome[0];
+ char *class;
+ char *level;
+
+ if (msg->magic != MAGIC) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid magic %x\n", msg->magic);
+ return;
+ }
+
+ if (!msg->ver || msg->ver > VERSION) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid version %d\n", msg->ver);
+ return;
+ }
+
+ if (msg->type != MSG_PUSH) {
+ pci_warn(qdev->pdev, "Dropping non-PUSH RAS message\n");
+ return;
+ }
+
+ if (msg->len != sizeof(*msg) - HDR_SZ) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid len %d\n", msg->len);
+ return;
+ }
+
+ if (msg->err_type >= ERR_TYPE_MAX) {
+ pci_warn(qdev->pdev, "Dropping RAS message with err type %d\n", msg->err_type);
+ return;
+ }
+
+ if (msg->err_type == UE)
+ level = KERN_ERR;
+ else
+ level = KERN_WARNING;
+
+ switch (msg->source) {
+ case SOC_MEM:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n 0x%llx\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ soc_syndrome->error_address[0],
+ soc_syndrome->error_address[1],
+ soc_syndrome->error_address[2],
+ soc_syndrome->error_address[3],
+ soc_syndrome->error_address[4],
+ soc_syndrome->error_address[5],
+ soc_syndrome->error_address[6],
+ soc_syndrome->error_address[7]);
+ break;
+ case PCIE:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold);
+
+ switch (msg->err_type) {
+ case CE:
+ /*
+ * Modeled after AER prints. This continues the dev_printk() from a few
+ * lines up. We reduce duplication of code, but also avoid re-printing the
+ * PCI device info so that the end result looks uniform to the log user.
+ */
+ printk(KERN_WARNING pr_fmt("Syndrome:\n Bad TLP count %d\n Bad DLLP count %d\n Replay Rollover count %d\n Replay Timeout count %d\n Recv Error count %d\n Internal CE count %d\n"),
+ pcie_syndrome->bad_tlp,
+ pcie_syndrome->bad_dllp,
+ pcie_syndrome->replay_rollover,
+ pcie_syndrome->replay_timeout,
+ pcie_syndrome->rx_err,
+ pcie_syndrome->internal_ce_count);
+ if (msg->ver > 0x1)
+ pr_warn(" Power break %s\n",
+ pcie_syndrome->flag & POWER_BREAK ? "ON" : "OFF");
+ break;
+ case UE:
+ printk(KERN_ERR pr_fmt("Syndrome:\n Index %d\n Address 0x%x\n"),
+ pcie_syndrome->index, pcie_syndrome->addr);
+ break;
+ case UE_NF:
+ printk(KERN_WARNING pr_fmt("Syndrome:\n FC timeout count %d\n Poisoned TLP count %d\n ECRC error count %d\n Unsupported request count %d\n Completer abort count %d\n Completion timeout count %d\n"),
+ pcie_syndrome->fc_timeout,
+ pcie_syndrome->poison_tlp,
+ pcie_syndrome->ecrc_err,
+ pcie_syndrome->unsupported_req,
+ pcie_syndrome->completer_abort,
+ pcie_syndrome->completion_timeout);
+ break;
+ default:
+ break;
+ }
+ break;
+ case DDR:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n Instance %d\n Count %d\n Data 31_0 0x%x 0x%x\n Data 63_32 0x%x 0x%x\n Data 95_64 0x%x 0x%x\n Data 127_96 0x%x 0x%x\n Parity bits 0x%x\n Address msb 0x%x\n Address lsb 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ ddr_syndrome->instance,
+ ddr_syndrome->count,
+ ddr_syndrome->data_31_0[1],
+ ddr_syndrome->data_31_0[0],
+ ddr_syndrome->data_63_32[1],
+ ddr_syndrome->data_63_32[0],
+ ddr_syndrome->data_95_64[1],
+ ddr_syndrome->data_95_64[0],
+ ddr_syndrome->data_127_96[1],
+ ddr_syndrome->data_127_96[0],
+ ddr_syndrome->parity_bits,
+ ddr_syndrome->addr_msb,
+ ddr_syndrome->addr_lsb);
+ break;
+ case SYS_BUS1:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n instance %d\n %s\n err_type %d\n address0 0x%x\n address1 0x%x\n address2 0x%x\n address3 0x%x\n address4 0x%x\n address5 0x%x\n address6 0x%x\n address7 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ sysbus1_syndrome->instance,
+ sysbus1_syndrome->slave ? "Slave" : "Master",
+ sysbus1_syndrome->err_type,
+ sysbus1_syndrome->addr[0],
+ sysbus1_syndrome->addr[1],
+ sysbus1_syndrome->addr[2],
+ sysbus1_syndrome->addr[3],
+ sysbus1_syndrome->addr[4],
+ sysbus1_syndrome->addr[5],
+ sysbus1_syndrome->addr[6],
+ sysbus1_syndrome->addr[7]);
+ break;
+ case SYS_BUS2:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n instance %d\n valid %d\n word error %d\n non-secure %d\n opc %d\n error code %d\n transaction type %d\n address space %d\n operation type %d\n len %d\n redirect %d\n path %d\n ext_id %d\n lsb2 %d\n msb2 %d\n lsb3 %d\n msb3 %d\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ sysbus2_syndrome->instance,
+ sysbus2_syndrome->valid,
+ sysbus2_syndrome->word_error,
+ sysbus2_syndrome->non_secure,
+ sysbus2_syndrome->opc,
+ sysbus2_syndrome->error_code,
+ sysbus2_syndrome->trans_type,
+ sysbus2_syndrome->addr_space,
+ sysbus2_syndrome->op_type,
+ sysbus2_syndrome->len,
+ sysbus2_syndrome->redirect,
+ sysbus2_syndrome->path,
+ sysbus2_syndrome->ext_id,
+ sysbus2_syndrome->lsb2,
+ sysbus2_syndrome->msb2,
+ sysbus2_syndrome->lsb3,
+ sysbus2_syndrome->msb3);
+ break;
+ case NSP_MEM:
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n NSP ID %d\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n 0x%x\n",
+ err_class_str[msg->err_type],
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ nsp_syndrome->nsp_id,
+ nsp_syndrome->error_address[0],
+ nsp_syndrome->error_address[1],
+ nsp_syndrome->error_address[2],
+ nsp_syndrome->error_address[3],
+ nsp_syndrome->error_address[4],
+ nsp_syndrome->error_address[5],
+ nsp_syndrome->error_address[6],
+ nsp_syndrome->error_address[7]);
+ break;
+ case TSENS:
+ if (tsens_syndrome->threshold_type >= NUM_TEMP_LVL) {
+ pci_warn(qdev->pdev, "Dropping RAS message with invalid temp threshold %d\n",
+ tsens_syndrome->threshold_type);
+ break;
+ }
+
+ if (msg->err_type)
+ class = "Fatal";
+ else if (tsens_syndrome->threshold_type)
+ class = "Critical";
+ else
+ class = "Warning";
+
+ dev_printk(level, &qdev->pdev->dev, "RAS event.\nClass:%s\nDescription:%s %s %s\nError Threshold for this report %d\nSyndrome:\n %s threshold\n %d deg C\n",
+ class,
+ err_type_str[msg->err_type],
+ "error from",
+ err_src_str[msg->source],
+ msg->err_threshold,
+ threshold_type_str[tsens_syndrome->threshold_type],
+ tsens_syndrome->temp);
+ break;
+ }
+
+ /* Uncorrectable errors are fatal */
+ if (msg->err_type == UE)
+ mhi_soc_reset(qdev->mhi_cntrl);
+
+ switch (msg->err_type) {
+ case CE:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ce_count++;
+ break;
+ case UE:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ue_count++;
+ break;
+ case UE_NF:
+ if (qdev->ce_count != UINT_MAX)
+ qdev->ue_nf_count++;
+ break;
+ default:
+ /* not possible */
+ break;
+ }
+}
+
+static ssize_t ce_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ce_count);
+}
+
+static ssize_t ue_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_count);
+}
+
+static ssize_t ue_nonfatal_count_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", qdev->ue_nf_count);
+}
+
+static DEVICE_ATTR_RO(ce_count);
+static DEVICE_ATTR_RO(ue_count);
+static DEVICE_ATTR_RO(ue_nonfatal_count);
+
+static struct attribute *ras_attrs[] = {
+ &dev_attr_ce_count.attr,
+ &dev_attr_ue_count.attr,
+ &dev_attr_ue_nonfatal_count.attr,
+ NULL,
+};
+
+static struct attribute_group ras_group = {
+ .attrs = ras_attrs,
+};
+
+static int qaic_ras_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
+{
+ struct qaic_device *qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
+ struct ras_data *resp;
+ int ret;
+
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret)
+ return ret;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ return -ENOMEM;
+ }
+
+ ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, resp, sizeof(*resp), MHI_EOT);
+ if (ret) {
+ kfree(resp);
+ mhi_unprepare_from_transfer(mhi_dev);
+ return ret;
+ }
+
+ ret = device_add_group(&qdev->pdev->dev, &ras_group);
+ if (ret) {
+ mhi_unprepare_from_transfer(mhi_dev);
+ pci_dbg(qdev->pdev, "ras add sysfs failed %d\n", ret);
+ return ret;
+ }
+
+ dev_set_drvdata(&mhi_dev->dev, qdev);
+ qdev->ras_ch = mhi_dev;
+
+ return ret;
+}
+
+static void qaic_ras_mhi_remove(struct mhi_device *mhi_dev)
+{
+ struct qaic_device *qdev;
+
+ qdev = dev_get_drvdata(&mhi_dev->dev);
+ qdev->ras_ch = NULL;
+ device_remove_group(&qdev->pdev->dev, &ras_group);
+ mhi_unprepare_from_transfer(mhi_dev);
+}
+
+static void qaic_ras_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) {}
+
+static void qaic_ras_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result)
+{
+ struct qaic_device *qdev = dev_get_drvdata(&mhi_dev->dev);
+ struct ras_data *msg = mhi_result->buf_addr;
+ int ret;
+
+ if (mhi_result->transaction_status) {
+ kfree(msg);
+ return;
+ }
+
+ ras_msg_to_cpu(msg);
+ decode_ras_msg(qdev, msg);
+
+ ret = mhi_queue_buf(qdev->ras_ch, DMA_FROM_DEVICE, msg, sizeof(*msg), MHI_EOT);
+ if (ret) {
+ dev_err(&mhi_dev->dev, "Cannot requeue RAS recv buf %d\n", ret);
+ kfree(msg);
+ }
+}
+
+static const struct mhi_device_id qaic_ras_mhi_match_table[] = {
+ { .chan = "QAIC_STATUS", },
+ {},
+};
+
+static struct mhi_driver qaic_ras_mhi_driver = {
+ .id_table = qaic_ras_mhi_match_table,
+ .remove = qaic_ras_mhi_remove,
+ .probe = qaic_ras_mhi_probe,
+ .ul_xfer_cb = qaic_ras_mhi_ul_xfer_cb,
+ .dl_xfer_cb = qaic_ras_mhi_dl_xfer_cb,
+ .driver = {
+ .name = "qaic_ras",
+ },
+};
+
+int qaic_ras_register(void)
+{
+ return mhi_driver_register(&qaic_ras_mhi_driver);
+}
+
+void qaic_ras_unregister(void)
+{
+ mhi_driver_unregister(&qaic_ras_mhi_driver);
+}
diff --git a/drivers/accel/qaic/qaic_ras.h b/drivers/accel/qaic/qaic_ras.h
new file mode 100644
index 000000000000..d44a4eeeb060
--- /dev/null
+++ b/drivers/accel/qaic/qaic_ras.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2020, The Linux Foundation. All rights reserved. */
+
+#ifndef __QAIC_RAS_H__
+#define __QAIC_RAS_H__
+
+int qaic_ras_register(void);
+void qaic_ras_unregister(void);
+
+#endif /* __QAIC_RAS_H__ */
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 6f8bbe1247a5..c9a0bcaba2e4 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -33,7 +33,7 @@
static DEFINE_MUTEX(isolated_cpus_lock);
static DEFINE_MUTEX(round_robin_lock);
-static unsigned long power_saving_mwait_eax;
+static unsigned int power_saving_mwait_eax;
static unsigned char tsc_detected_unstable;
static unsigned char tsc_marked_unstable;
diff --git a/drivers/acpi/apei/einj-core.c b/drivers/acpi/apei/einj-core.c
index fea11a35eea3..9b041415a9d0 100644
--- a/drivers/acpi/apei/einj-core.c
+++ b/drivers/acpi/apei/einj-core.c
@@ -883,19 +883,16 @@ static int __init einj_init(void)
}
einj_dev = faux_device_create("acpi-einj", NULL, &einj_device_ops);
- if (!einj_dev)
- return -ENODEV;
- einj_initialized = true;
+ if (einj_dev)
+ einj_initialized = true;
return 0;
}
static void __exit einj_exit(void)
{
- if (einj_initialized)
- faux_device_destroy(einj_dev);
-
+ faux_device_destroy(einj_dev);
}
module_init(einj_init);
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a9ae2fd62863..6b649031808f 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -476,7 +476,7 @@ bool cppc_allow_fast_switch(void)
struct cpc_desc *cpc_ptr;
int cpu;
- for_each_possible_cpu(cpu) {
+ for_each_present_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 6f4203716b53..75c7db8b156a 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -23,8 +23,10 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/printk.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/suspend.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
@@ -2031,6 +2033,21 @@ void __init acpi_ec_ecdt_probe(void)
goto out;
}
+ if (!strstarts(ecdt_ptr->id, "\\")) {
+ /*
+ * The ECDT table on some MSI notebooks contains invalid data, together
+ * with an empty ID string ("").
+ *
+ * Section 5.2.15 of the ACPI specification requires the ID string to be
+ * a "fully qualified reference to the (...) embedded controller device",
+ * so this string always has to start with a backslash.
+ *
+ * By verifying this we can avoid such faulty ECDT tables in a safe way.
+ */
+ pr_err(FW_BUG "Ignoring ECDT due to invalid ID string \"%s\"\n", ecdt_ptr->id);
+ goto out;
+ }
+
ec = acpi_ec_alloc();
if (!ec)
goto out;
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 00910ccd7eda..e2781864fdce 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -175,6 +175,12 @@ bool processor_physically_present(acpi_handle handle);
static inline void acpi_early_processor_control_setup(void) {}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void);
+#else
+static inline void acpi_idle_rescan_dead_smt_siblings(void) {}
+#endif
+
/* --------------------------------------------------------------------------
Embedded Controller
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 3b281bc1e73c..65e779be64ff 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -279,6 +279,9 @@ static int __init acpi_processor_driver_init(void)
* after acpi_cppc_processor_probe() has been called for all online CPUs
*/
acpi_processor_init_invariance_cppc();
+
+ acpi_idle_rescan_dead_smt_siblings();
+
return 0;
err:
driver_unregister(&acpi_processor_driver);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index e2febca2ec13..2c2dc559e0f8 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -24,6 +24,8 @@
#include <acpi/processor.h>
#include <linux/context_tracking.h>
+#include "internal.h"
+
/*
* Include the apic definitions for x86 to have the APIC timer related defines
* available also for UP (on SMP it gets magically included via linux/smp.h).
@@ -55,6 +57,12 @@ struct cpuidle_driver acpi_idle_driver = {
};
#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+void acpi_idle_rescan_dead_smt_siblings(void)
+{
+ if (cpuidle_get_driver() == &acpi_idle_driver)
+ arch_cpu_rescan_dead_smt_siblings();
+}
+
static
DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 7d59c6c9185f..b1ab192d7a08 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -667,6 +667,13 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ /* MACHENIKE L16P/L16P */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MACHENIKE"),
+ DMI_MATCH(DMI_BOARD_NAME, "L16P"),
+ },
+ },
+ {
/*
* TongFang GM5HG0A in case of the SKIKK Vanaheim relabel the
* board-name is changed, so check OEM strings instead. Note
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
index 9054d346bd7f..f5fbda0a9a44 100644
--- a/drivers/base/faux.c
+++ b/drivers/base/faux.c
@@ -86,6 +86,7 @@ static struct device_driver faux_driver = {
.name = "faux_driver",
.bus = &faux_bus_type,
.probe_type = PROBE_FORCE_SYNCHRONOUS,
+ .suppress_bind_attrs = true,
};
static void faux_device_release(struct device *dev)
@@ -169,7 +170,7 @@ struct faux_device *faux_device_create_with_groups(const char *name,
* successful is almost impossible to determine by the caller.
*/
if (!dev->driver) {
- dev_err(dev, "probe did not succeed, tearing down the device\n");
+ dev_dbg(dev, "probe did not succeed, tearing down the device\n");
faux_device_destroy(faux_dev);
faux_dev = NULL;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f8d136684109..500840e4a74e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1248,12 +1248,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
lo->lo_flags &= ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
lo->lo_flags |= (info->lo_flags & LOOP_SET_STATUS_SETTABLE_FLAGS);
- if (size_changed) {
- loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
- lo->lo_backing_file);
- loop_set_size(lo, new_size);
- }
-
/* update the direct I/O flag if lo_offset changed */
loop_update_dio(lo);
@@ -1261,6 +1255,11 @@ out_unfreeze:
blk_mq_unfreeze_queue(lo->lo_queue, memflags);
if (partscan)
clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state);
+ if (!err && size_changed) {
+ loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
+ lo->lo_backing_file);
+ loop_set_size(lo, new_size);
+ }
out_unlock:
mutex_unlock(&lo->lo_mutex);
if (partscan)
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 50fe17f1e1d1..563165c5efae 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -396,8 +396,13 @@ static int btintel_pcie_submit_rx(struct btintel_pcie_data *data)
static int btintel_pcie_start_rx(struct btintel_pcie_data *data)
{
int i, ret;
+ struct rxq *rxq = &data->rxq;
+
+ /* Post (BTINTEL_PCIE_RX_DESCS_COUNT - 3) buffers to overcome the
+ * hardware issues leading to race condition at the firmware.
+ */
- for (i = 0; i < BTINTEL_PCIE_RX_MAX_QUEUE; i++) {
+ for (i = 0; i < rxq->count - 3; i++) {
ret = btintel_pcie_submit_rx(data);
if (ret)
return ret;
@@ -1782,8 +1787,8 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
* + size of index * Number of queues(2) * type of index array(4)
* + size of context information
*/
- total = (sizeof(struct tfd) + sizeof(struct urbd0) + sizeof(struct frbd)
- + sizeof(struct urbd1)) * BTINTEL_DESCS_COUNT;
+ total = (sizeof(struct tfd) + sizeof(struct urbd0)) * BTINTEL_PCIE_TX_DESCS_COUNT;
+ total += (sizeof(struct frbd) + sizeof(struct urbd1)) * BTINTEL_PCIE_RX_DESCS_COUNT;
/* Add the sum of size of index array and size of ci struct */
total += (sizeof(u16) * BTINTEL_PCIE_NUM_QUEUES * 4) + sizeof(struct ctx_info);
@@ -1808,36 +1813,36 @@ static int btintel_pcie_alloc(struct btintel_pcie_data *data)
data->dma_v_addr = v_addr;
/* Setup descriptor count */
- data->txq.count = BTINTEL_DESCS_COUNT;
- data->rxq.count = BTINTEL_DESCS_COUNT;
+ data->txq.count = BTINTEL_PCIE_TX_DESCS_COUNT;
+ data->rxq.count = BTINTEL_PCIE_RX_DESCS_COUNT;
/* Setup tfds */
data->txq.tfds_p_addr = p_addr;
data->txq.tfds = v_addr;
- p_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct tfd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct tfd) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup urbd0 */
data->txq.urbd0s_p_addr = p_addr;
data->txq.urbd0s = v_addr;
- p_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd0) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd0) * BTINTEL_PCIE_TX_DESCS_COUNT);
/* Setup FRBD*/
data->rxq.frbds_p_addr = p_addr;
data->rxq.frbds = v_addr;
- p_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct frbd) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct frbd) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup urbd1 */
data->rxq.urbd1s_p_addr = p_addr;
data->rxq.urbd1s = v_addr;
- p_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
- v_addr += (sizeof(struct urbd1) * BTINTEL_DESCS_COUNT);
+ p_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
+ v_addr += (sizeof(struct urbd1) * BTINTEL_PCIE_RX_DESCS_COUNT);
/* Setup data buffers for txq */
err = btintel_pcie_setup_txq_bufs(data, &data->txq);
diff --git a/drivers/bluetooth/btintel_pcie.h b/drivers/bluetooth/btintel_pcie.h
index 21b964b15c1c..7dad4523236c 100644
--- a/drivers/bluetooth/btintel_pcie.h
+++ b/drivers/bluetooth/btintel_pcie.h
@@ -154,8 +154,11 @@ enum msix_mbox_int_causes {
/* Default interrupt timeout in msec */
#define BTINTEL_DEFAULT_INTR_TIMEOUT_MS 3000
-/* The number of descriptors in TX/RX queues */
-#define BTINTEL_DESCS_COUNT 16
+/* The number of descriptors in TX queues */
+#define BTINTEL_PCIE_TX_DESCS_COUNT 32
+
+/* The number of descriptors in RX queues */
+#define BTINTEL_PCIE_RX_DESCS_COUNT 64
/* Number of Queue for TX and RX
* It indicates the index of the IA(Index Array)
@@ -177,9 +180,6 @@ enum {
/* Doorbell vector for TFD */
#define BTINTEL_PCIE_TX_DB_VEC 0
-/* Number of pending RX requests for downlink */
-#define BTINTEL_PCIE_RX_MAX_QUEUE 6
-
/* Doorbell vector for FRBD */
#define BTINTEL_PCIE_RX_DB_VEC 513
diff --git a/drivers/cpufreq/rcpufreq_dt.rs b/drivers/cpufreq/rcpufreq_dt.rs
index 94ed81644fe1..43c87d0259b6 100644
--- a/drivers/cpufreq/rcpufreq_dt.rs
+++ b/drivers/cpufreq/rcpufreq_dt.rs
@@ -26,9 +26,9 @@ fn find_supply_name_exact(dev: &Device, name: &str) -> Option<CString> {
}
/// Finds supply name for the CPU from DT.
-fn find_supply_names(dev: &Device, cpu: u32) -> Option<KVec<CString>> {
+fn find_supply_names(dev: &Device, cpu: cpu::CpuId) -> Option<KVec<CString>> {
// Try "cpu0" for older DTs, fallback to "cpu".
- let name = (cpu == 0)
+ let name = (cpu.as_u32() == 0)
.then(|| find_supply_name_exact(dev, "cpu0"))
.flatten()
.or_else(|| find_supply_name_exact(dev, "cpu"))?;
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index fee04fdb0822..b46eb8a552d7 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -36,7 +36,6 @@ config UDMABUF
depends on DMA_SHARED_BUFFER
depends on MEMFD_CREATE || COMPILE_TEST
depends on MMU
- select VMAP_PFN
help
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 890ecac04dac..2bcf9ceca997 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1118,7 +1118,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
* Catch exporters making buffers inaccessible even when
* attachments preventing that exist.
*/
- WARN_ON_ONCE(ret == EBUSY);
+ WARN_ON_ONCE(ret == -EBUSY);
if (ret)
return ERR_PTR(ret);
}
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
index 9663ba1bb6ac..a8a90acf4f34 100644
--- a/drivers/dma-buf/dma-fence-chain.c
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -218,7 +218,6 @@ static void dma_fence_chain_set_deadline(struct dma_fence *fence,
}
const struct dma_fence_ops dma_fence_chain_ops = {
- .use_64bit_seqno = true,
.get_driver_name = dma_fence_chain_get_driver_name,
.get_timeline_name = dma_fence_chain_get_timeline_name,
.enable_signaling = dma_fence_chain_enable_signaling,
@@ -252,7 +251,7 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
chain->prev_seqno = 0;
/* Try to reuse the context of the previous chain node. */
- if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
+ if (prev_chain && __dma_fence_is_later(prev, seqno, prev->seqno)) {
context = prev->context;
chain->prev_seqno = prev->seqno;
} else {
@@ -262,8 +261,8 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
seqno = max(prev->seqno, seqno);
}
- dma_fence_init(&chain->base, &dma_fence_chain_ops,
- &chain->lock, context, seqno);
+ dma_fence_init64(&chain->base, &dma_fence_chain_ops, &chain->lock,
+ context, seqno);
/*
* Chaining dma_fence_chain container together is only allowed through
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index f0cdd3e99d36..3f78c56b58dc 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -511,12 +511,20 @@ dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
dma_fence_enable_sw_signaling(fence);
- trace_dma_fence_wait_start(fence);
+ if (trace_dma_fence_wait_start_enabled()) {
+ rcu_read_lock();
+ trace_dma_fence_wait_start(fence);
+ rcu_read_unlock();
+ }
if (fence->ops->wait)
ret = fence->ops->wait(fence, intr, timeout);
else
ret = dma_fence_default_wait(fence, intr, timeout);
- trace_dma_fence_wait_end(fence);
+ if (trace_dma_fence_wait_end_enabled()) {
+ rcu_read_lock();
+ trace_dma_fence_wait_end(fence);
+ rcu_read_unlock();
+ }
return ret;
}
EXPORT_SYMBOL(dma_fence_wait_timeout);
@@ -533,16 +541,23 @@ void dma_fence_release(struct kref *kref)
struct dma_fence *fence =
container_of(kref, struct dma_fence, refcount);
+ rcu_read_lock();
trace_dma_fence_destroy(fence);
- if (WARN(!list_empty(&fence->cb_list) &&
- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
- "Fence %s:%s:%llx:%llx released with pending signals!\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
- fence->context, fence->seqno)) {
+ if (!list_empty(&fence->cb_list) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ const char __rcu *timeline;
+ const char __rcu *driver;
unsigned long flags;
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
+
+ WARN(1,
+ "Fence %s:%s:%llx:%llx released with pending signals!\n",
+ rcu_dereference(driver), rcu_dereference(timeline),
+ fence->context, fence->seqno);
+
/*
* Failed to signal before release, likely a refcounting issue.
*
@@ -556,6 +571,8 @@ void dma_fence_release(struct kref *kref)
spin_unlock_irqrestore(fence->lock, flags);
}
+ rcu_read_unlock();
+
if (fence->ops->release)
fence->ops->release(fence);
else
@@ -982,13 +999,43 @@ EXPORT_SYMBOL(dma_fence_set_deadline);
*/
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
{
+ const char __rcu *timeline;
+ const char __rcu *driver;
+
+ rcu_read_lock();
+
+ timeline = dma_fence_timeline_name(fence);
+ driver = dma_fence_driver_name(fence);
+
seq_printf(seq, "%s %s seq %llu %ssignalled\n",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence), fence->seqno,
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
+ fence->seqno,
dma_fence_is_signaled(fence) ? "" : "un");
+
+ rcu_read_unlock();
}
EXPORT_SYMBOL(dma_fence_describe);
+static void
+__dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno, unsigned long flags)
+{
+ BUG_ON(!lock);
+ BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+
+ kref_init(&fence->refcount);
+ fence->ops = ops;
+ INIT_LIST_HEAD(&fence->cb_list);
+ fence->lock = lock;
+ fence->context = context;
+ fence->seqno = seqno;
+ fence->flags = flags;
+ fence->error = 0;
+
+ trace_dma_fence_init(fence);
+}
+
/**
* dma_fence_init - Initialize a custom fence.
* @fence: the fence to initialize
@@ -1008,18 +1055,94 @@ void
dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno)
{
- BUG_ON(!lock);
- BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+ __dma_fence_init(fence, ops, lock, context, seqno, 0UL);
+}
+EXPORT_SYMBOL(dma_fence_init);
- kref_init(&fence->refcount);
- fence->ops = ops;
- INIT_LIST_HEAD(&fence->cb_list);
- fence->lock = lock;
- fence->context = context;
- fence->seqno = seqno;
- fence->flags = 0UL;
- fence->error = 0;
+/**
+ * dma_fence_init64 - Initialize a custom fence with 64-bit seqno support.
+ * @fence: the fence to initialize
+ * @ops: the dma_fence_ops for operations on this fence
+ * @lock: the irqsafe spinlock to use for locking this fence
+ * @context: the execution context this fence is run on
+ * @seqno: a linear increasing sequence number for this context
+ *
+ * Initializes an allocated fence, the caller doesn't have to keep its
+ * refcount after committing with this fence, but it will need to hold a
+ * refcount again if &dma_fence_ops.enable_signaling gets called.
+ *
+ * Context and seqno are used for easy comparison between fences, allowing
+ * to check which fence is later by simply using dma_fence_later().
+ */
+void
+dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno)
+{
+ __dma_fence_init(fence, ops, lock, context, seqno,
+ BIT(DMA_FENCE_FLAG_SEQNO64_BIT));
+}
+EXPORT_SYMBOL(dma_fence_init64);
- trace_dma_fence_init(fence);
+/**
+ * dma_fence_driver_name - Access the driver name
+ * @fence: the fence to query
+ *
+ * Returns a driver name backing the dma-fence implementation.
+ *
+ * IMPORTANT CONSIDERATION:
+ * Dma-fence contract stipulates that access to driver provided data (data not
+ * directly embedded into the object itself), such as the &dma_fence.lock and
+ * memory potentially accessed by the &dma_fence.ops functions, is forbidden
+ * after the fence has been signalled. Drivers are allowed to free that data,
+ * and some do.
+ *
+ * To allow safe access drivers are mandated to guarantee a RCU grace period
+ * between signalling the fence and freeing said data.
+ *
+ * As such access to the driver name is only valid inside a RCU locked section.
+ * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
+ * by the &rcu_read_lock and &rcu_read_unlock pair.
+ */
+const char __rcu *dma_fence_driver_name(struct dma_fence *fence)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "RCU protection is required for safe access to returned string");
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return fence->ops->get_driver_name(fence);
+ else
+ return "detached-driver";
}
-EXPORT_SYMBOL(dma_fence_init);
+EXPORT_SYMBOL(dma_fence_driver_name);
+
+/**
+ * dma_fence_timeline_name - Access the timeline name
+ * @fence: the fence to query
+ *
+ * Returns a timeline name provided by the dma-fence implementation.
+ *
+ * IMPORTANT CONSIDERATION:
+ * Dma-fence contract stipulates that access to driver provided data (data not
+ * directly embedded into the object itself), such as the &dma_fence.lock and
+ * memory potentially accessed by the &dma_fence.ops functions, is forbidden
+ * after the fence has been signalled. Drivers are allowed to free that data,
+ * and some do.
+ *
+ * To allow safe access drivers are mandated to guarantee a RCU grace period
+ * between signalling the fence and freeing said data.
+ *
+ * As such access to the driver name is only valid inside a RCU locked section.
+ * The pointer MUST be both queried and USED ONLY WITHIN a SINGLE block guarded
+ * by the &rcu_read_lock and &rcu_read_unlock pair.
+ */
+const char __rcu *dma_fence_timeline_name(struct dma_fence *fence)
+{
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "RCU protection is required for safe access to returned string");
+
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return fence->ops->get_driver_name(fence);
+ else
+ return "signaled-timeline";
+}
+EXPORT_SYMBOL(dma_fence_timeline_name);
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 4f27ee93a00c..3c20f1d31cf5 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -170,7 +170,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence)
{
struct sync_timeline *parent = dma_fence_parent(fence);
- return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
+ return !__dma_fence_is_later(fence, fence->seqno, parent->value);
}
static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline)
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index d9b1c1b2a72b..747e377fb954 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -135,12 +135,18 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
strscpy(buf, sync_file->user_name, len);
} else {
struct dma_fence *fence = sync_file->fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
snprintf(buf, len, "%s-%s%llu-%lld",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
fence->context,
fence->seqno);
+ rcu_read_unlock();
}
return buf;
@@ -262,9 +268,17 @@ err_put_fd:
static int sync_fill_fence_info(struct dma_fence *fence,
struct sync_fence_info *info)
{
- strscpy(info->obj_name, fence->ops->get_timeline_name(fence),
+ const char __rcu *timeline;
+ const char __rcu *driver;
+
+ rcu_read_lock();
+
+ driver = dma_fence_driver_name(fence);
+ timeline = dma_fence_timeline_name(fence);
+
+ strscpy(info->obj_name, rcu_dereference(timeline),
sizeof(info->obj_name));
- strscpy(info->driver_name, fence->ops->get_driver_name(fence),
+ strscpy(info->driver_name, rcu_dereference(driver),
sizeof(info->driver_name));
info->status = dma_fence_get_status(fence);
@@ -273,6 +287,8 @@ static int sync_fill_fence_info(struct dma_fence *fence,
ktime_to_ns(dma_fence_timestamp(fence)) :
ktime_set(0, 0);
+ rcu_read_unlock();
+
return info->status;
}
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 7eee3eb47a8e..40399c26e6be 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -109,29 +109,22 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
{
struct udmabuf *ubuf = buf->priv;
- unsigned long *pfns;
+ struct page **pages;
void *vaddr;
pgoff_t pg;
dma_resv_assert_held(buf->resv);
- /**
- * HVO may free tail pages, so just use pfn to map each folio
- * into vmalloc area.
- */
- pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL);
- if (!pfns)
+ pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL);
+ if (!pages)
return -ENOMEM;
- for (pg = 0; pg < ubuf->pagecount; pg++) {
- unsigned long pfn = folio_pfn(ubuf->folios[pg]);
-
- pfn += ubuf->offsets[pg] >> PAGE_SHIFT;
- pfns[pg] = pfn;
- }
+ for (pg = 0; pg < ubuf->pagecount; pg++)
+ pages[pg] = folio_page(ubuf->folios[pg],
+ ubuf->offsets[pg] >> PAGE_SHIFT);
- vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL);
- kvfree(pfns);
+ vaddr = vm_map_ram(pages, ubuf->pagecount, -1);
+ kvfree(pages);
if (!vaddr)
return -EINVAL;
@@ -264,8 +257,7 @@ static int begin_cpu_udmabuf(struct dma_buf *buf,
ubuf->sg = NULL;
}
} else {
- dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
- direction);
+ dma_sync_sgtable_for_cpu(dev, ubuf->sg, direction);
}
return ret;
@@ -280,7 +272,7 @@ static int end_cpu_udmabuf(struct dma_buf *buf,
if (!ubuf->sg)
return -EINVAL;
- dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ dma_sync_sgtable_for_device(dev, ubuf->sg, direction);
return 0;
}
diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug
index fa6ee76f4d3c..05dc43c0b8c5 100644
--- a/drivers/gpu/drm/Kconfig.debug
+++ b/drivers/gpu/drm/Kconfig.debug
@@ -70,6 +70,7 @@ config DRM_KUNIT_TEST
select DRM_GEM_SHMEM_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
+ select DRM_SYSFB_HELPER
select PRIME_NUMBERS
default KUNIT_ALL_TESTS
help
diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c
index 2b60128e2c69..cba7d32150a9 100644
--- a/drivers/gpu/drm/adp/adp-mipi.c
+++ b/drivers/gpu/drm/adp/adp-mipi.c
@@ -229,9 +229,10 @@ static int adp_mipi_probe(struct platform_device *pdev)
{
struct adp_mipi_drv_private *adp;
- adp = devm_kzalloc(&pdev->dev, sizeof(*adp), GFP_KERNEL);
- if (!adp)
- return -ENOMEM;
+ adp = devm_drm_bridge_alloc(&pdev->dev, struct adp_mipi_drv_private,
+ bridge, &adp_dsi_bridge_funcs);
+ if (IS_ERR(adp))
+ return PTR_ERR(adp);
adp->mipi = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(adp->mipi)) {
@@ -241,7 +242,6 @@ static int adp_mipi_probe(struct platform_device *pdev)
adp->dsi.dev = &pdev->dev;
adp->dsi.ops = &adp_dsi_host_ops;
- adp->bridge.funcs = &adp_dsi_bridge_funcs;
adp->bridge.of_node = pdev->dev.of_node;
adp->bridge.type = DRM_MODE_CONNECTOR_DSI;
dev_set_drvdata(&pdev->dev, adp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index d8ac4b1051a8..55161e5cdc30 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -642,7 +642,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
goto err;
}
- ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
+ ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job, 0);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9ea0d9b71f48..a2adaacf6adb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -293,7 +293,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
for (i = 0; i < p->gang_size; ++i) {
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
- num_ibs[i], &p->jobs[i]);
+ num_ibs[i], &p->jobs[i],
+ p->filp->client_id);
if (ret)
goto free_all_kdata;
switch (p->adev->enforce_isolation[fpriv->xcp_id]) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 8e626f50b362..dac4b926e7be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1786,7 +1786,7 @@ static int amdgpu_debugfs_vm_info_show(struct seq_file *m, void *unused)
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
- seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->pid, ti->process_name);
+ seq_printf(m, "pid:%d\tProcess:%s ----------\n", ti->task.pid, ti->process_name);
amdgpu_vm_put_task_info(ti);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index 7b50741dc097..8a026bc9ea44 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -220,10 +220,10 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
drm_printf(&p, "time: %lld.%09ld\n", coredump->reset_time.tv_sec,
coredump->reset_time.tv_nsec);
- if (coredump->reset_task_info.pid)
+ if (coredump->reset_task_info.task.pid)
drm_printf(&p, "process_name: %s PID: %d\n",
coredump->reset_task_info.process_name,
- coredump->reset_task_info.pid);
+ coredump->reset_task_info.task.pid);
/* SOC Information */
drm_printf(&p, "\nSOC Information\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e1bab6a96cb6..a59f194e3360 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -6363,8 +6363,17 @@ end_reset:
atomic_set(&adev->reset_domain->reset_res, r);
- if (!r)
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+ if (!r) {
+ struct amdgpu_task_info *ti = NULL;
+
+ if (job)
+ ti = amdgpu_vm_get_task_info_pasid(adev, job->pasid);
+
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE,
+ ti ? &ti->task : NULL);
+
+ amdgpu_vm_put_task_info(ti);
+ }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
index 8b919ad3af29..23d7d0b0d625 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_eviction_fence.c
@@ -143,7 +143,6 @@ static bool amdgpu_eviction_fence_enable_signaling(struct dma_fence *f)
}
static const struct dma_fence_ops amdgpu_eviction_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_eviction_fence_get_driver_name,
.get_timeline_name = amdgpu_eviction_fence_get_timeline_name,
.enable_signaling = amdgpu_eviction_fence_enable_signaling,
@@ -169,9 +168,9 @@ amdgpu_eviction_fence_create(struct amdgpu_eviction_fence_mgr *evf_mgr)
ev_fence->evf_mgr = evf_mgr;
get_task_comm(ev_fence->timeline_name, current);
spin_lock_init(&ev_fence->lock);
- dma_fence_init(&ev_fence->base, &amdgpu_eviction_fence_ops,
- &ev_fence->lock, evf_mgr->ev_fence_ctx,
- atomic_inc_return(&evf_mgr->ev_fence_seq));
+ dma_fence_init64(&ev_fence->base, &amdgpu_eviction_fence_ops,
+ &ev_fence->lock, evf_mgr->ev_fence_ctx,
+ atomic_inc_return(&evf_mgr->ev_fence_seq));
return ev_fence;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0ecc88df7208..e5e33a68d935 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -329,7 +329,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj,
dev_warn(adev->dev, "validate_and_fence failed: %d\n", r);
if (ti) {
- dev_warn(adev->dev, "pid %d\n", ti->pid);
+ dev_warn(adev->dev, "pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index acb21fc8b3ce..1e24590ae144 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -89,6 +89,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
+ struct drm_wedge_task_info *info = NULL;
struct amdgpu_task_info *ti;
struct amdgpu_device *adev = ring->adev;
int idx;
@@ -124,10 +125,8 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
if (ti) {
- dev_err(adev->dev,
- "Process information: process %s pid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
- amdgpu_vm_put_task_info(ti);
+ amdgpu_vm_print_task_info(adev, ti);
+ info = &ti->task;
}
/* attempt a per ring reset */
@@ -166,13 +165,15 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
if (amdgpu_ring_sched_ready(ring))
drm_sched_start(&ring->sched, 0);
dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name);
- drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE);
+ drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, info);
goto exit;
}
dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name);
}
dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+ amdgpu_vm_put_task_info(ti);
+
if (amdgpu_device_should_recover_gpu(ring->adev)) {
struct amdgpu_reset_context reset_context;
memset(&reset_context, 0, sizeof(reset_context));
@@ -204,7 +205,8 @@ exit:
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job)
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id)
{
if (num_ibs == 0)
return -EINVAL;
@@ -222,7 +224,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!entity)
return 0;
- return drm_sched_job_init(&(*job)->base, entity, 1, owner);
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner,
+ drm_client_id);
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
@@ -232,7 +235,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
{
int r;
- r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index f2c049129661..529045d60412 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -91,7 +91,8 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct drm_sched_entity *entity, void *owner,
- unsigned int num_ibs, struct amdgpu_job **job);
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
struct drm_sched_entity *entity, void *owner,
size_t size, enum amdgpu_ib_pool_type pool_type,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 11dd2e0f7979..d13e64a69e25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -167,25 +167,23 @@ TRACE_EVENT(amdgpu_cs_ioctl,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__field(struct dma_fence *, fence)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -193,24 +191,22 @@ TRACE_EVENT(amdgpu_sched_run_job,
TP_PROTO(struct amdgpu_job *job),
TP_ARGS(job),
TP_STRUCT__entry(
- __field(uint64_t, sched_job_id)
__string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job))
- __field(unsigned int, context)
- __field(unsigned int, seqno)
+ __field(u64, context)
+ __field(u64, seqno)
__string(ring, to_amdgpu_ring(job->base.sched)->name)
__field(u32, num_ibs)
),
TP_fast_assign(
- __entry->sched_job_id = job->base.id;
__assign_str(timeline);
__entry->context = job->base.s_fence->finished.context;
__entry->seqno = job->base.s_fence->finished.seqno;
__assign_str(ring);
__entry->num_ibs = job->num_ibs;
),
- TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u",
- __entry->sched_job_id, __get_str(timeline), __entry->context,
+ TP_printk("timeline=%s, fence=%llu:%llu, ring_name=%s, num_ibs=%u",
+ __get_str(timeline), __entry->context,
__entry->seqno, __get_str(ring), __entry->num_ibs)
);
@@ -551,23 +547,19 @@ TRACE_EVENT(amdgpu_ib_pipe_sync,
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
__string(ring, sched_job->base.sched->name)
- __field(uint64_t, id)
__field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
__assign_str(ring);
- __entry->id = sched_job->base.id;
__entry->fence = fence;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u",
- __get_str(ring), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("job ring=%s need pipe sync to fence=%llu:%llu",
+ __get_str(ring), __entry->ctx, __entry->seqno)
);
TRACE_EVENT(amdgpu_reset_reg_dumps,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
index a86616c6deef..c2a983ff23c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
@@ -239,8 +239,8 @@ static int amdgpu_userq_fence_create(struct amdgpu_usermode_queue *userq,
fence = &userq_fence->base;
userq_fence->fence_drv = fence_drv;
- dma_fence_init(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
- fence_drv->context, seq);
+ dma_fence_init64(fence, &amdgpu_userq_fence_ops, &userq_fence->lock,
+ fence_drv->context, seq);
amdgpu_userq_fence_driver_get(fence_drv);
dma_fence_get(fence);
@@ -334,7 +334,6 @@ static void amdgpu_userq_fence_release(struct dma_fence *f)
}
static const struct dma_fence_ops amdgpu_userq_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_userq_fence_get_driver_name,
.get_timeline_name = amdgpu_userq_fence_get_timeline_name,
.signaled = amdgpu_userq_fence_signaled,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 3911c78f8282..0ff95a56c2ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -622,7 +622,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
pr_warn_ratelimited("Evicted user BO is not reserved\n");
if (ti) {
- pr_warn_ratelimited("pid %d\n", ti->pid);
+ pr_warn_ratelimited("pid %d\n", ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
@@ -2447,7 +2447,8 @@ amdgpu_vm_get_vm_from_pasid(struct amdgpu_device *adev, u32 pasid)
*/
void amdgpu_vm_put_task_info(struct amdgpu_task_info *task_info)
{
- kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
+ if (task_info)
+ kref_put(&task_info->refcount, amdgpu_vm_destroy_task_info);
}
/**
@@ -2507,11 +2508,11 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
if (!vm->task_info)
return;
- if (vm->task_info->pid == current->pid)
+ if (vm->task_info->task.pid == current->pid)
return;
- vm->task_info->pid = current->pid;
- get_task_comm(vm->task_info->task_name, current);
+ vm->task_info->task.pid = current->pid;
+ get_task_comm(vm->task_info->task.comm, current);
if (current->group_leader->mm != current->mm)
return;
@@ -2774,7 +2775,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dev_warn(adev->dev,
"VM memory stats for proc %s(%d) task %s(%d) is non-zero when fini\n",
- ti->process_name, ti->pid, ti->task_name, ti->tgid);
+ ti->process_name, ti->task.pid, ti->task.comm, ti->tgid);
}
amdgpu_vm_put_task_info(vm->task_info);
@@ -3156,3 +3157,12 @@ bool amdgpu_vm_is_bo_always_valid(struct amdgpu_vm *vm, struct amdgpu_bo *bo)
{
return bo && bo->tbo.base.resv == vm->root.bo->tbo.base.resv;
}
+
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info)
+{
+ dev_err(adev->dev,
+ " Process %s pid %d thread %s pid %d\n",
+ task_info->process_name, task_info->tgid,
+ task_info->task.comm, task_info->task.pid);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index f3ad687125ad..fd086efd8457 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -236,9 +236,8 @@ struct amdgpu_vm_pte_funcs {
};
struct amdgpu_task_info {
+ struct drm_wedge_task_info task;
char process_name[TASK_COMM_LEN];
- char task_name[TASK_COMM_LEN];
- pid_t pid;
pid_t tgid;
struct kref refcount;
};
@@ -668,4 +667,7 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
+void amdgpu_vm_print_task_info(struct amdgpu_device *adev,
+ struct amdgpu_task_info *task_info);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
index 51cddfa3f1e8..5d26797356a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_tlb_fence.c
@@ -71,7 +71,6 @@ static void amdgpu_tlb_fence_work(struct work_struct *work)
}
static const struct dma_fence_ops amdgpu_tlb_fence_ops = {
- .use_64bit_seqno = true,
.get_driver_name = amdgpu_tlb_fence_get_driver_name,
.get_timeline_name = amdgpu_tlb_fence_get_timeline_name
};
@@ -101,8 +100,8 @@ void amdgpu_vm_tlb_fence_create(struct amdgpu_device *adev, struct amdgpu_vm *vm
INIT_WORK(&f->work, amdgpu_tlb_fence_work);
spin_lock_init(&f->lock);
- dma_fence_init(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
- vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
+ dma_fence_init64(&f->base, &amdgpu_tlb_fence_ops, &f->lock,
+ vm->tlb_fence_context, atomic64_read(&vm->tlb_seq));
/* TODO: We probably need a separate wq here */
dma_fence_get(&f->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index a3e2787501f1..7923f491cf73 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -164,10 +164,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 72211409227b..f15d691e9a20 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -134,10 +134,7 @@ static int gmc_v11_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
index b645d3e6a6c8..de763105fdfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
@@ -127,10 +127,7 @@ static int gmc_v12_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid, entry->pasid);
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " in process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 99ca08e9bdb5..b45fa0cea9d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1458,9 +1458,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev, " for process %s pid %d thread %s pid %d\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 282197f4ffb1..78f65aea03f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -641,10 +641,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
task_info = amdgpu_vm_get_task_info_pasid(adev, entry->pasid);
if (task_info) {
- dev_err(adev->dev,
- " for process %s pid %d thread %s pid %d)\n",
- task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ amdgpu_vm_print_task_info(adev, task_info);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 33ed2b158fcd..f38004e6064e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2187,7 +2187,7 @@ static int sdma_v4_0_print_iv_entry(struct amdgpu_device *adev,
dev_dbg_ratelimited(adev->dev,
" for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 9c169112a5e7..bcde34e4e0a1 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -1884,7 +1884,7 @@ static int sdma_v4_4_2_print_iv_entry(struct amdgpu_device *adev,
if (task_info) {
dev_dbg_ratelimited(adev->dev, " for process %s pid %d thread %s pid %d\n",
task_info->process_name, task_info->tgid,
- task_info->task_name, task_info->pid);
+ task_info->task.comm, task_info->task.pid);
amdgpu_vm_put_task_info(task_info);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 2b294ada3ec0..82905f3e54dd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1302,7 +1302,7 @@ void kfd_signal_reset_event(struct kfd_node *dev)
if (ti) {
dev_err(dev->adev->dev,
"Queues reset on process %s tid %d thread %s pid %d\n",
- ti->process_name, ti->tgid, ti->task_name, ti->pid);
+ ti->process_name, ti->tgid, ti->task.comm, ti->task.pid);
amdgpu_vm_put_task_info(ti);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
index 83d9384ac815..a499449fcb06 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
@@ -253,9 +253,9 @@ void kfd_smi_event_update_vmfault(struct kfd_node *dev, uint16_t pasid)
task_info = amdgpu_vm_get_task_info_pasid(dev->adev, pasid);
if (task_info) {
/* Report VM faults from user applications, not retry from kernel */
- if (task_info->pid)
+ if (task_info->task.pid)
kfd_smi_event_add(0, dev, KFD_SMI_EVENT_VMFAULT, KFD_EVENT_FMT_VMFAULT(
- task_info->pid, task_info->task_name));
+ task_info->task.pid, task_info->task.comm));
amdgpu_vm_put_task_info(task_info);
}
}
@@ -359,8 +359,8 @@ void kfd_smi_event_process(struct kfd_process_device *pdd, bool start)
kfd_smi_event_add(0, pdd->dev,
start ? KFD_SMI_EVENT_PROCESS_START :
KFD_SMI_EVENT_PROCESS_END,
- KFD_EVENT_FMT_PROCESS(task_info->pid,
- task_info->task_name));
+ KFD_EVENT_FMT_PROCESS(task_info->task.pid,
+ task_info->task.comm));
amdgpu_vm_put_task_info(task_info);
}
}
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 3cfefadc7c9d..806da0aaedf7 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -11,8 +11,8 @@
#include <linux/clk.h>
#include <linux/of_graph.h>
-#include <linux/platform_data/simplefb.h>
+#include <video/pixel_format.h>
#include <video/videomode.h>
#include <drm/drm_atomic.h>
@@ -73,7 +73,17 @@ static const struct drm_crtc_funcs hdlcd_crtc_funcs = {
.disable_vblank = hdlcd_crtc_disable_vblank,
};
-static struct simplefb_format supported_formats[] = SIMPLEFB_FORMATS;
+static const struct {
+ u32 fourcc;
+ struct pixel_format pixel;
+} supported_formats[] = {
+ { DRM_FORMAT_RGB565, PIXEL_FORMAT_RGB565 },
+ { DRM_FORMAT_XRGB1555, PIXEL_FORMAT_XRGB1555 },
+ { DRM_FORMAT_RGB888, PIXEL_FORMAT_RGB888 },
+ { DRM_FORMAT_XRGB8888, PIXEL_FORMAT_XRGB8888 },
+ { DRM_FORMAT_XBGR8888, PIXEL_FORMAT_XBGR8888 },
+ { DRM_FORMAT_XRGB2101010, PIXEL_FORMAT_XRGB2101010},
+};
/*
* Setup the HDLCD registers for decoding the pixels out of the framebuffer
@@ -83,15 +93,12 @@ static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc)
unsigned int btpp;
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
const struct drm_framebuffer *fb = crtc->primary->state->fb;
- uint32_t pixel_format;
- struct simplefb_format *format = NULL;
+ const struct pixel_format *format = NULL;
int i;
- pixel_format = fb->format->format;
-
for (i = 0; i < ARRAY_SIZE(supported_formats); i++) {
- if (supported_formats[i].fourcc == pixel_format)
- format = &supported_formats[i];
+ if (supported_formats[i].fourcc == fb->format->format)
+ format = &supported_formats[i].pixel;
}
if (WARN_ON(!format))
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 6fbf62a99c48..054acda41909 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -64,7 +64,7 @@ static const struct drm_driver ast_driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_GEM_SHMEM_DRIVER_OPS_NO_MAP_SGT,
DRM_FBDEV_SHMEM_DRIVER_OPS,
};
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 1de832964e92..7908087bcb5a 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_format_helper.h>
@@ -71,31 +72,44 @@ static unsigned long ast_fb_vram_size(struct ast_device *ast)
return cursor_offset - offset;
}
-static inline void ast_load_palette_index(struct ast_device *ast,
- u8 index, u8 red, u8 green,
- u8 blue)
+static void ast_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
{
- ast_io_write8(ast, AST_IO_VGADWR, index);
+ struct drm_device *dev = crtc->dev;
+ struct ast_device *ast = to_ast_device(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ ast_io_write8(ast, AST_IO_VGADWR, i8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, red);
+ ast_io_write8(ast, AST_IO_VGAPDR, r8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, green);
+ ast_io_write8(ast, AST_IO_VGAPDR, g8);
ast_io_read8(ast, AST_IO_VGASRI);
- ast_io_write8(ast, AST_IO_VGAPDR, blue);
+ ast_io_write8(ast, AST_IO_VGAPDR, b8);
ast_io_read8(ast, AST_IO_VGASRI);
}
-static void ast_crtc_set_gamma_linear(struct ast_device *ast,
- const struct drm_format_info *format)
+static void ast_crtc_fill_gamma(struct ast_device *ast,
+ const struct drm_format_info *format)
{
- int i;
+ struct drm_crtc *crtc = &ast->crtc;
switch (format->format) {
- case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_C8:
+ /* gamma table is used as color palette */
+ drm_crtc_fill_palette_8(crtc, ast_set_gamma_lut);
+ break;
case DRM_FORMAT_RGB565:
+ /* also uses 8-bit gamma ramp on low-color modes */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < AST_LUT_SIZE; i++)
- ast_load_palette_index(ast, i, i, i, i);
+ drm_crtc_fill_gamma_888(crtc, ast_set_gamma_lut);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
@@ -104,21 +118,22 @@ static void ast_crtc_set_gamma_linear(struct ast_device *ast,
}
}
-static void ast_crtc_set_gamma(struct ast_device *ast,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void ast_crtc_load_gamma(struct ast_device *ast,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
- int i;
+ struct drm_crtc *crtc = &ast->crtc;
switch (format->format) {
- case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
+ case DRM_FORMAT_C8:
+ /* gamma table is used as color palette */
+ drm_crtc_load_palette_8(crtc, lut, ast_set_gamma_lut);
+ break;
case DRM_FORMAT_RGB565:
+ /* also uses 8-bit gamma ramp on low-color modes */
+ fallthrough;
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < AST_LUT_SIZE; i++)
- ast_load_palette_index(ast, i,
- lut[i].red >> 8,
- lut[i].green >> 8,
- lut[i].blue >> 8);
+ drm_crtc_load_gamma_888(crtc, lut, ast_set_gamma_lut);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
@@ -811,11 +826,11 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
*/
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
if (crtc_state->gamma_lut)
- ast_crtc_set_gamma(ast,
- ast_crtc_state->format,
- crtc_state->gamma_lut->data);
+ ast_crtc_load_gamma(ast,
+ ast_crtc_state->format,
+ crtc_state->gamma_lut->data);
else
- ast_crtc_set_gamma_linear(ast, ast_crtc_state->format);
+ ast_crtc_fill_gamma(ast, ast_crtc_state->format);
}
}
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index f46a5e26b5dd..59a5256ce8a6 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -5,6 +5,9 @@ config DRM_I2C_ADV7511
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_MIPI_DSI
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select DRM_DISPLAY_HDMI_STATE_HELPER
help
Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders.
@@ -19,7 +22,7 @@ config DRM_I2C_ADV7511_AUDIO
config DRM_I2C_ADV7511_CEC
bool "ADV7511/33/35 HDMI CEC driver"
depends on DRM_I2C_ADV7511
- select CEC_CORE
+ select DRM_DISPLAY_HDMI_CEC_HELPER
default y
help
When selected the HDMI transmitter will support the CEC feature.
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index ec0b7f3d889c..71bb64e5f481 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -313,16 +313,11 @@ enum adv7511_csc_scaling {
* @csc_enable: Whether to enable color space conversion
* @csc_scaling_factor: Color space conversion scaling factor
* @csc_coefficents: Color space conversion coefficents
- * @hdmi_mode: Whether to use HDMI or DVI output mode
- * @avi_infoframe: HDMI infoframe
*/
struct adv7511_video_config {
bool csc_enable;
enum adv7511_csc_scaling csc_scaling_factor;
const uint16_t *csc_coefficents;
-
- bool hdmi_mode;
- struct hdmi_avi_infoframe avi_infoframe;
};
enum adv7511_type {
@@ -337,6 +332,7 @@ struct adv7511_chip_info {
enum adv7511_type type;
unsigned int max_mode_clock_khz;
unsigned int max_lane_freq_khz;
+ const char *name;
const char * const *supply_names;
unsigned int num_supplies;
unsigned int reg_cec_offset;
@@ -371,7 +367,7 @@ struct adv7511 {
struct work_struct hpd_work;
struct drm_bridge bridge;
- struct drm_connector connector;
+ struct drm_connector *cec_connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
@@ -389,9 +385,7 @@ struct adv7511 {
bool use_timing_gen;
const struct adv7511_chip_info *info;
- struct platform_device *audio_pdev;
- struct cec_adapter *cec_adap;
u8 cec_addr[ADV7511_MAX_ADDRS];
u8 cec_valid_addrs;
bool cec_enabled_adap;
@@ -399,20 +393,29 @@ struct adv7511 {
u32 cec_clk_freq;
};
+static inline struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct adv7511, bridge);
+}
+
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
+int adv7511_cec_init(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+int adv7511_cec_enable(struct drm_bridge *bridge, bool enable);
+int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr);
+int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
#else
-static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
-{
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
- ADV7511_CEC_CTRL_POWER_DOWN);
- return 0;
-}
+#define adv7511_cec_init NULL
+#define adv7511_cec_enable NULL
+#define adv7511_cec_log_addr NULL
+#define adv7511_cec_transmit NULL
#endif
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
+void adv7533_dsi_config_timing_gen(struct adv7511 *adv);
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
@@ -421,16 +424,18 @@ int adv7533_attach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
-int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
-void adv7511_audio_exit(struct adv7511 *adv7511);
+int adv7511_hdmi_audio_startup(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+void adv7511_hdmi_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge);
+int adv7511_hdmi_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms);
#else /*CONFIG_DRM_I2C_ADV7511_AUDIO */
-static inline int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
-{
- return 0;
-}
-static inline void adv7511_audio_exit(struct adv7511 *adv7511)
-{
-}
+#define adv7511_hdmi_audio_startup NULL
+#define adv7511_hdmi_audio_shutdown NULL
+#define adv7511_hdmi_audio_prepare NULL
#endif /* CONFIG_DRM_I2C_ADV7511_AUDIO */
#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 1ff8c815ec79..915c3b967216 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -55,11 +55,12 @@ static int adv7511_update_cts_n(struct adv7511 *adv7511)
return 0;
}
-static int adv7511_hdmi_hw_params(struct device *dev, void *data,
- struct hdmi_codec_daifmt *fmt,
- struct hdmi_codec_params *hparms)
+int adv7511_hdmi_audio_prepare(struct drm_connector *connector,
+ struct drm_bridge *bridge,
+ struct hdmi_codec_daifmt *fmt,
+ struct hdmi_codec_params *hparms)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int audio_source, i2s_format = 0;
unsigned int invert_clock;
unsigned int rate;
@@ -167,9 +168,10 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
return 0;
}
-static int audio_startup(struct device *dev, void *data)
+int adv7511_hdmi_audio_startup(struct drm_connector *connector,
+ struct drm_bridge *bridge)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
@@ -204,69 +206,12 @@ static int audio_startup(struct device *dev, void *data)
return 0;
}
-static void audio_shutdown(struct device *dev, void *data)
+void adv7511_hdmi_audio_shutdown(struct drm_connector *connector,
+ struct drm_bridge *bridge)
{
- struct adv7511 *adv7511 = dev_get_drvdata(dev);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
BIT(7), 0);
}
-
-static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
- struct device_node *endpoint,
- void *data)
-{
- struct of_endpoint of_ep;
- int ret;
-
- ret = of_graph_parse_endpoint(endpoint, &of_ep);
- if (ret < 0)
- return ret;
-
- /*
- * HDMI sound should be located as reg = <2>
- * Then, it is sound port 0
- */
- if (of_ep.port == 2)
- return 0;
-
- return -EINVAL;
-}
-
-static const struct hdmi_codec_ops adv7511_codec_ops = {
- .hw_params = adv7511_hdmi_hw_params,
- .audio_shutdown = audio_shutdown,
- .audio_startup = audio_startup,
- .get_dai_id = adv7511_hdmi_i2s_get_dai_id,
-};
-
-static const struct hdmi_codec_pdata codec_data = {
- .ops = &adv7511_codec_ops,
- .i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |
- SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE |
- SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
- .max_i2s_channels = 2,
- .i2s = 1,
- .no_i2s_capture = 1,
- .spdif = 1,
- .no_spdif_capture = 1,
-};
-
-int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
-{
- adv7511->audio_pdev = platform_device_register_data(dev,
- HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &codec_data,
- sizeof(codec_data));
- return PTR_ERR_OR_ZERO(adv7511->audio_pdev);
-}
-
-void adv7511_audio_exit(struct adv7511 *adv7511)
-{
- if (adv7511->audio_pdev) {
- platform_device_unregister(adv7511->audio_pdev);
- adv7511->audio_pdev = NULL;
- }
-}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index 2e9c88a2b5ed..822265426f58 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -12,6 +12,8 @@
#include <media/cec.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
#include "adv7511.h"
static const u8 ADV7511_REG_CEC_RX_FRAME_HDR[] = {
@@ -44,8 +46,8 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
return;
if (tx_raw_status & ADV7511_INT1_CEC_TX_ARBIT_LOST) {
- cec_transmit_attempt_done(adv7511->cec_adap,
- CEC_TX_STATUS_ARB_LOST);
+ drm_connector_hdmi_cec_transmit_attempt_done(adv7511->cec_connector,
+ CEC_TX_STATUS_ARB_LOST);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_RETRY_TIMEOUT) {
@@ -72,12 +74,14 @@ static void adv_cec_tx_raw_status(struct adv7511 *adv7511, u8 tx_raw_status)
if (low_drive_cnt)
status |= CEC_TX_STATUS_LOW_DRIVE;
}
- cec_transmit_done(adv7511->cec_adap, status,
- 0, nack_cnt, low_drive_cnt, err_cnt);
+ drm_connector_hdmi_cec_transmit_done(adv7511->cec_connector, status,
+ 0, nack_cnt, low_drive_cnt,
+ err_cnt);
return;
}
if (tx_raw_status & ADV7511_INT1_CEC_TX_READY) {
- cec_transmit_attempt_done(adv7511->cec_adap, CEC_TX_STATUS_OK);
+ drm_connector_hdmi_cec_transmit_attempt_done(adv7511->cec_connector,
+ CEC_TX_STATUS_OK);
return;
}
}
@@ -116,7 +120,7 @@ static void adv7511_cec_rx(struct adv7511 *adv7511, int rx_buf)
regmap_update_bits(adv7511->regmap_cec,
ADV7511_REG_CEC_RX_BUFFERS + offset, BIT(rx_buf), 0);
- cec_received_msg(adv7511->cec_adap, &msg);
+ drm_connector_hdmi_cec_received_msg(adv7511->cec_connector, &msg);
}
int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
@@ -179,9 +183,9 @@ int adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1)
return IRQ_HANDLED;
}
-static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
+int adv7511_cec_enable(struct drm_bridge *bridge, bool enable)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
if (adv7511->i2c_cec == NULL)
@@ -225,9 +229,9 @@ static int adv7511_cec_adap_enable(struct cec_adapter *adap, bool enable)
return 0;
}
-static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
+int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
unsigned int i, free_idx = ADV7511_MAX_ADDRS;
@@ -293,10 +297,10 @@ static int adv7511_cec_adap_log_addr(struct cec_adapter *adap, u8 addr)
return 0;
}
-static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
- u32 signal_free_time, struct cec_msg *msg)
+int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
{
- struct adv7511 *adv7511 = cec_get_drvdata(adap);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
unsigned int offset = adv7511->info->reg_cec_offset;
u8 len = msg->len;
unsigned int i;
@@ -328,12 +332,6 @@ static int adv7511_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
return 0;
}
-static const struct cec_adap_ops adv7511_cec_adap_ops = {
- .adap_enable = adv7511_cec_adap_enable,
- .adap_log_addr = adv7511_cec_adap_log_addr,
- .adap_transmit = adv7511_cec_adap_transmit,
-};
-
static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
{
adv7511->cec_clk = devm_clk_get(dev, "cec");
@@ -348,20 +346,18 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511)
return 0;
}
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+int adv7511_cec_init(struct drm_connector *connector,
+ struct drm_bridge *bridge)
{
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+ struct device *dev = &adv7511->i2c_main->dev;
unsigned int offset = adv7511->info->reg_cec_offset;
int ret = adv7511_cec_parse_dt(dev, adv7511);
if (ret)
goto err_cec_parse_dt;
- adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
- adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
- if (IS_ERR(adv7511->cec_adap)) {
- ret = PTR_ERR(adv7511->cec_adap);
- goto err_cec_alloc;
- }
+ adv7511->cec_connector = connector;
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL, 0);
/* cec soft reset */
@@ -378,17 +374,8 @@ int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
ADV7511_REG_CEC_CLK_DIV + offset,
((adv7511->cec_clk_freq / 750000) - 1) << 2);
- ret = cec_register_adapter(adv7511->cec_adap, dev);
- if (ret)
- goto err_cec_register;
return 0;
-err_cec_register:
- cec_delete_adapter(adv7511->cec_adap);
- adv7511->cec_adap = NULL;
-err_cec_alloc:
- dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
- ret);
err_cec_parse_dt:
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
ADV7511_CEC_CTRL_POWER_DOWN);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 1257009e850c..9df18a8f2e37 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -12,14 +12,17 @@
#include <linux/of.h>
#include <linux/slab.h>
-#include <media/cec.h>
+#include <sound/pcm.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/display/drm_hdmi_helper.h>
+#include <drm/display/drm_hdmi_state_helper.h>
#include "adv7511.h"
@@ -203,62 +206,37 @@ static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
static void adv7511_set_config_csc(struct adv7511 *adv7511,
struct drm_connector *connector,
- bool rgb, bool hdmi_mode)
+ bool rgb)
{
struct adv7511_video_config config;
bool output_format_422, output_format_ycbcr;
unsigned int mode;
- uint8_t infoframe[17];
-
- config.hdmi_mode = hdmi_mode;
-
- hdmi_avi_infoframe_init(&config.avi_infoframe);
-
- config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
if (rgb) {
config.csc_enable = false;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
+ output_format_422 = false;
+ output_format_ycbcr = false;
} else {
config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
if ((connector->display_info.color_formats &
DRM_COLOR_FORMAT_YCBCR422) &&
- config.hdmi_mode) {
+ connector->display_info.is_hdmi) {
config.csc_enable = false;
- config.avi_infoframe.colorspace =
- HDMI_COLORSPACE_YUV422;
- } else {
- config.csc_enable = true;
- config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
- }
- }
-
- if (config.hdmi_mode) {
- mode = ADV7511_HDMI_CFG_MODE_HDMI;
-
- switch (config.avi_infoframe.colorspace) {
- case HDMI_COLORSPACE_YUV444:
- output_format_422 = false;
- output_format_ycbcr = true;
- break;
- case HDMI_COLORSPACE_YUV422:
output_format_422 = true;
output_format_ycbcr = true;
- break;
- default:
+ } else {
+ config.csc_enable = true;
output_format_422 = false;
output_format_ycbcr = false;
- break;
}
- } else {
- mode = ADV7511_HDMI_CFG_MODE_DVI;
- output_format_422 = false;
- output_format_ycbcr = false;
}
- adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ if (connector->display_info.is_hdmi)
+ mode = ADV7511_HDMI_CFG_MODE_HDMI;
+ else
+ mode = ADV7511_HDMI_CFG_MODE_DVI;
adv7511_set_colormap(adv7511, config.csc_enable,
config.csc_coefficents,
@@ -269,15 +247,6 @@ static void adv7511_set_config_csc(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
ADV7511_HDMI_CFG_MODE_MASK, mode);
-
- hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
- sizeof(infoframe));
-
- /* The AVI infoframe id is not configurable */
- regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
- infoframe + 1, sizeof(infoframe) - 1);
-
- adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
}
static void adv7511_set_link_config(struct adv7511 *adv7511,
@@ -446,22 +415,16 @@ static void adv7511_hpd_work(struct work_struct *work)
* restore its state.
*/
if (status == connector_status_connected &&
- adv7511->connector.status == connector_status_disconnected &&
+ adv7511->status == connector_status_disconnected &&
adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
}
- if (adv7511->connector.status != status) {
- adv7511->connector.status = status;
+ if (adv7511->status != status) {
+ adv7511->status = status;
- if (adv7511->connector.dev) {
- if (status == connector_status_disconnected)
- cec_phys_addr_invalidate(adv7511->cec_adap);
- drm_kms_helper_hotplug_event(adv7511->connector.dev);
- } else {
- drm_bridge_hpd_notify(&adv7511->bridge, status);
- }
+ drm_bridge_hpd_notify(&adv7511->bridge, status);
}
}
@@ -636,45 +599,11 @@ static const struct drm_edid *adv7511_edid_read(struct adv7511 *adv7511,
if (!adv7511->powered)
__adv7511_power_off(adv7511);
- if (drm_edid) {
- /*
- * FIXME: The CEC physical address should be set using
- * cec_s_phys_addr(adap,
- * connector->display_info.source_physical_address, false) from
- * a path that has read the EDID and called
- * drm_edid_connector_update().
- */
- const struct edid *edid = drm_edid_raw(drm_edid);
-
- adv7511_set_config_csc(adv7511, connector, adv7511->rgb,
- drm_detect_hdmi_monitor(edid));
-
- cec_s_phys_addr_from_edid(adv7511->cec_adap, edid);
- } else {
- cec_s_phys_addr_from_edid(adv7511->cec_adap, NULL);
- }
-
return drm_edid;
}
-static int adv7511_get_modes(struct adv7511 *adv7511,
- struct drm_connector *connector)
-{
- const struct drm_edid *drm_edid;
- unsigned int count;
-
- drm_edid = adv7511_edid_read(adv7511, connector);
-
- drm_edid_connector_update(connector, drm_edid);
- count = drm_edid_connector_add_modes(connector);
-
- drm_edid_free(drm_edid);
-
- return count;
-}
-
static enum drm_connector_status
-adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
+adv7511_detect(struct adv7511 *adv7511)
{
enum drm_connector_status status;
unsigned int val;
@@ -699,8 +628,6 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
if (status == connector_status_connected && hpd && adv7511->powered) {
regcache_mark_dirty(adv7511->regmap);
adv7511_power_on(adv7511);
- if (connector)
- adv7511_get_modes(adv7511, connector);
if (adv7511->status == connector_status_connected)
status = connector_status_disconnected;
} else {
@@ -719,17 +646,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
return status;
}
-static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
- const struct drm_display_mode *mode)
-{
- if (mode->clock > 165000)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
-}
-
static void adv7511_mode_set(struct adv7511 *adv7511,
- const struct drm_display_mode *mode,
const struct drm_display_mode *adj_mode)
{
unsigned int low_refresh_rate;
@@ -800,11 +717,11 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
vsync_polarity = 1;
}
- if (drm_mode_vrefresh(mode) <= 24)
+ if (drm_mode_vrefresh(adj_mode) <= 24)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
- else if (drm_mode_vrefresh(mode) <= 25)
+ else if (drm_mode_vrefresh(adj_mode) <= 25)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
- else if (drm_mode_vrefresh(mode) <= 30)
+ else if (drm_mode_vrefresh(adj_mode) <= 30)
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
else
low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
@@ -821,82 +738,30 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
drm_mode_copy(&adv7511->curr_mode, adj_mode);
+ /* Update horizontal/vertical porch params */
+ if (adv7511->info->has_dsi && adv7511->use_timing_gen)
+ adv7533_dsi_config_timing_gen(adv7511);
+
/*
* TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
* supposed to give better results.
*/
- adv7511->f_tmds = mode->clock;
-}
-
-/* -----------------------------------------------------------------------------
- * DRM Connector Operations
- */
-
-static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
-{
- return container_of(connector, struct adv7511, connector);
+ adv7511->f_tmds = adj_mode->clock;
}
-static int adv7511_connector_get_modes(struct drm_connector *connector)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_get_modes(adv, connector);
-}
-
-static enum drm_mode_status
-adv7511_connector_mode_valid(struct drm_connector *connector,
- const struct drm_display_mode *mode)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_mode_valid(adv, mode);
-}
-
-static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
- .get_modes = adv7511_connector_get_modes,
- .mode_valid = adv7511_connector_mode_valid,
-};
-
-static enum drm_connector_status
-adv7511_connector_detect(struct drm_connector *connector, bool force)
-{
- struct adv7511 *adv = connector_to_adv7511(connector);
-
- return adv7511_detect(adv, connector);
-}
-
-static const struct drm_connector_funcs adv7511_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = adv7511_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
static int adv7511_connector_init(struct adv7511 *adv)
{
struct drm_bridge *bridge = &adv->bridge;
- int ret;
-
- if (adv->i2c_main->irq)
- adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
- else
- adv->connector.polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ struct drm_connector *connector;
- ret = drm_connector_init(bridge->dev, &adv->connector,
- &adv7511_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA);
- if (ret < 0) {
+ connector = drm_bridge_connector_init(bridge->dev, bridge->encoder);
+ if (IS_ERR(connector)) {
DRM_ERROR("Failed to initialize connector with drm\n");
- return ret;
+ return PTR_ERR(connector);
}
- drm_connector_helper_add(&adv->connector,
- &adv7511_connector_helper_funcs);
- drm_connector_attach_encoder(&adv->connector, bridge->encoder);
+
+ drm_connector_attach_encoder(connector, bridge->encoder);
return 0;
}
@@ -905,7 +770,7 @@ static int adv7511_connector_init(struct adv7511 *adv)
* DRM Bridge Operations
*/
-static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
+static const struct adv7511 *bridge_to_adv7511_const(const struct drm_bridge *bridge)
{
return container_of(bridge, struct adv7511, bridge);
}
@@ -914,8 +779,29 @@ static void adv7511_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
+ struct drm_connector *connector;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
adv7511_power_on(adv);
+
+ connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ adv7511_set_config_csc(adv, connector, adv->rgb);
+
+ adv7511_mode_set(adv, &crtc_state->adjusted_mode);
+
+ drm_atomic_helper_connector_hdmi_update_infoframes(connector, state);
}
static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
@@ -926,13 +812,17 @@ static void adv7511_bridge_atomic_disable(struct drm_bridge *bridge,
adv7511_power_off(adv);
}
-static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adj_mode)
+static enum drm_mode_status
+adv7511_bridge_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate)
{
- struct adv7511 *adv = bridge_to_adv7511(bridge);
+ const struct adv7511 *adv = bridge_to_adv7511_const(bridge);
- adv7511_mode_set(adv, mode, adj_mode);
+ if (tmds_rate > 1000ULL * adv->info->max_mode_clock_khz)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
}
static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
@@ -941,10 +831,10 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- if (adv->info->has_dsi)
- return adv7533_mode_valid(adv, mode);
- else
- return adv7511_mode_valid(adv, mode);
+ if (!adv->info->has_dsi)
+ return MODE_OK;
+
+ return adv7533_mode_valid(adv, mode);
}
static int adv7511_bridge_attach(struct drm_bridge *bridge,
@@ -978,7 +868,7 @@ static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
- return adv7511_detect(adv, NULL);
+ return adv7511_detect(adv);
}
static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge,
@@ -989,28 +879,71 @@ static const struct drm_edid *adv7511_bridge_edid_read(struct drm_bridge *bridge
return adv7511_edid_read(adv, connector);
}
-static void adv7511_bridge_hpd_notify(struct drm_bridge *bridge,
- enum drm_connector_status status)
+static int adv7511_bridge_hdmi_clear_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type)
{
- struct adv7511 *adv = bridge_to_adv7511(bridge);
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ break;
+ default:
+ drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ break;
+ }
- if (status == connector_status_disconnected)
- cec_phys_addr_invalidate(adv->cec_adap);
+ return 0;
+}
+
+static int adv7511_bridge_hdmi_write_infoframe(struct drm_bridge *bridge,
+ enum hdmi_infoframe_type type,
+ const u8 *buffer, size_t len)
+{
+ struct adv7511 *adv7511 = bridge_to_adv7511(bridge);
+
+ adv7511_bridge_hdmi_clear_infoframe(bridge, type);
+
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ /* The AVI infoframe id is not configurable */
+ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
+ buffer + 1, len - 1);
+
+ adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
+ break;
+ default:
+ drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type);
+ break;
+ }
+
+ return 0;
}
static const struct drm_bridge_funcs adv7511_bridge_funcs = {
- .mode_set = adv7511_bridge_mode_set,
.mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
.edid_read = adv7511_bridge_edid_read,
- .hpd_notify = adv7511_bridge_hpd_notify,
.atomic_enable = adv7511_bridge_atomic_enable,
.atomic_disable = adv7511_bridge_atomic_disable,
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+
+ .hdmi_tmds_char_rate_valid = adv7511_bridge_hdmi_tmds_char_rate_valid,
+ .hdmi_clear_infoframe = adv7511_bridge_hdmi_clear_infoframe,
+ .hdmi_write_infoframe = adv7511_bridge_hdmi_write_infoframe,
+
+ .hdmi_audio_startup = adv7511_hdmi_audio_startup,
+ .hdmi_audio_prepare = adv7511_hdmi_audio_prepare,
+ .hdmi_audio_shutdown = adv7511_hdmi_audio_shutdown,
+
+ .hdmi_cec_init = adv7511_cec_init,
+ .hdmi_cec_enable = adv7511_cec_enable,
+ .hdmi_cec_log_addr = adv7511_cec_log_addr,
+ .hdmi_cec_transmit = adv7511_cec_transmit,
};
/* -----------------------------------------------------------------------------
@@ -1224,9 +1157,10 @@ static int adv7511_probe(struct i2c_client *i2c)
if (!dev->of_node)
return -EINVAL;
- adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
- if (!adv7511)
- return -ENOMEM;
+ adv7511 = devm_drm_bridge_alloc(dev, struct adv7511, bridge,
+ &adv7511_bridge_funcs);
+ if (IS_ERR(adv7511))
+ return PTR_ERR(adv7511);
adv7511->i2c_main = i2c;
adv7511->powered = false;
@@ -1323,22 +1257,43 @@ static int adv7511_probe(struct i2c_client *i2c)
if (adv7511->info->link_config)
adv7511_set_link_config(adv7511, &link_config);
- ret = adv7511_cec_init(dev, adv7511);
- if (ret)
- goto err_unregister_cec;
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
+ ADV7511_CEC_CTRL_POWER_DOWN);
- adv7511->bridge.funcs = &adv7511_bridge_funcs;
- adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
+ adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT |
+ DRM_BRIDGE_OP_EDID |
+ DRM_BRIDGE_OP_HDMI |
+ DRM_BRIDGE_OP_HDMI_AUDIO |
+ DRM_BRIDGE_OP_HDMI_CEC_ADAPTER;
if (adv7511->i2c_main->irq)
adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD;
+ adv7511->bridge.vendor = "Analog";
+ adv7511->bridge.product = adv7511->info->name;
+
+#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
+ adv7511->bridge.hdmi_audio_dev = dev;
+ adv7511->bridge.hdmi_audio_max_i2s_playback_channels = 2;
+ adv7511->bridge.hdmi_audio_i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S20_3LE |
+ SNDRV_PCM_FMTBIT_S24_3LE |
+ SNDRV_PCM_FMTBIT_S24_LE |
+ SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE),
+ adv7511->bridge.hdmi_audio_spdif_playback = 1;
+ adv7511->bridge.hdmi_audio_dai_port = 2;
+#endif
+
+#ifdef CONFIG_DRM_I2C_ADV7511_CEC
+ adv7511->bridge.hdmi_cec_dev = dev;
+ adv7511->bridge.hdmi_cec_adapter_name = dev_name(dev);
+ adv7511->bridge.hdmi_cec_available_las = ADV7511_MAX_ADDRS;
+#endif
+
adv7511->bridge.of_node = dev->of_node;
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&adv7511->bridge);
- adv7511_audio_init(dev, adv7511);
-
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
@@ -1360,10 +1315,7 @@ static int adv7511_probe(struct i2c_client *i2c)
return 0;
err_unregister_audio:
- adv7511_audio_exit(adv7511);
drm_bridge_remove(&adv7511->bridge);
-err_unregister_cec:
- cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
err_i2c_unregister_packet:
@@ -1388,9 +1340,6 @@ static void adv7511_remove(struct i2c_client *i2c)
drm_bridge_remove(&adv7511->bridge);
- adv7511_audio_exit(adv7511);
-
- cec_unregister_adapter(adv7511->cec_adap);
i2c_unregister_device(adv7511->i2c_cec);
clk_disable_unprepare(adv7511->cec_clk);
@@ -1400,6 +1349,8 @@ static void adv7511_remove(struct i2c_client *i2c)
static const struct adv7511_chip_info adv7511_chip_info = {
.type = ADV7511,
+ .name = "ADV7511",
+ .max_mode_clock_khz = 165000,
.supply_names = adv7511_supply_names,
.num_supplies = ARRAY_SIZE(adv7511_supply_names),
.link_config = true,
@@ -1407,6 +1358,7 @@ static const struct adv7511_chip_info adv7511_chip_info = {
static const struct adv7511_chip_info adv7533_chip_info = {
.type = ADV7533,
+ .name = "ADV7533",
.max_mode_clock_khz = 80000,
.max_lane_freq_khz = 800000,
.supply_names = adv7533_supply_names,
@@ -1417,6 +1369,7 @@ static const struct adv7511_chip_info adv7533_chip_info = {
static const struct adv7511_chip_info adv7535_chip_info = {
.type = ADV7535,
+ .name = "ADV7535",
.max_mode_clock_khz = 148500,
.max_lane_freq_khz = 891000,
.supply_names = adv7533_supply_names,
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
index 122ad91e8a32..188c1093a66e 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -24,7 +24,7 @@ static const struct reg_sequence adv7533_cec_fixed_registers[] = {
{ 0x05, 0xc8 },
};
-static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
+void adv7533_dsi_config_timing_gen(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
struct drm_display_mode *mode = &adv->curr_mode;
@@ -67,9 +67,6 @@ void adv7533_dsi_power_on(struct adv7511 *adv)
{
struct mipi_dsi_device *dsi = adv->dsi;
- if (adv->use_timing_gen)
- adv7511_dsi_config_timing_gen(adv);
-
/* set number of dsi lanes */
regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
@@ -106,10 +103,6 @@ enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
struct mipi_dsi_device *dsi = adv->dsi;
u8 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- /* Check max clock for either 7533 or 7535 */
- if (mode->clock > adv->info->max_mode_clock_khz)
- return MODE_CLOCK_HIGH;
-
/* Check max clock for each lane */
if (mode->clock * bpp > adv->info->max_lane_freq_khz * adv->num_dsi_lanes)
return MODE_CLOCK_HIGH;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index a83020d6576f..ba0fc149a9e7 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1193,9 +1193,10 @@ static int anx78xx_i2c_probe(struct i2c_client *client)
bool found = false;
int err;
- anx78xx = devm_kzalloc(&client->dev, sizeof(*anx78xx), GFP_KERNEL);
- if (!anx78xx)
- return -ENOMEM;
+ anx78xx = devm_drm_bridge_alloc(&client->dev, struct anx78xx, bridge,
+ &anx78xx_bridge_funcs);
+ if (IS_ERR(anx78xx))
+ return PTR_ERR(anx78xx);
pdata = &anx78xx->pdata;
@@ -1306,8 +1307,6 @@ static int anx78xx_i2c_probe(struct i2c_client *client)
goto err_poweroff;
}
- anx78xx->bridge.funcs = &anx78xx_bridge_funcs;
-
drm_bridge_add(&anx78xx->bridge);
/* If cable is pulled out, just poweroff and wait for HPD event */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
index b1e482994ffe..e8662168717d 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-i2c-dptx.c
@@ -5,6 +5,8 @@
* Based on anx7808 driver obtained from chromeos with copyright:
* Copyright(c) 2013, Google Inc.
*/
+
+#include <linux/export.h>
#include <linux/regmap.h>
#include <drm/display/drm_dp_helper.h>
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 505eec6b819b..a1bc3e96dd35 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 8a9079c2ed5c..0ac4a82c5a6e 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -2596,7 +2596,6 @@ static int anx7625_link_bridge(struct drm_dp_aux *aux)
return ret;
}
- platform->bridge.funcs = &anx7625_bridge_funcs;
platform->bridge.of_node = dev->of_node;
if (!anx7625_of_panel_on_aux_bus(dev))
platform->bridge.ops |= DRM_BRIDGE_OP_EDID;
@@ -2630,10 +2629,10 @@ static int anx7625_i2c_probe(struct i2c_client *client)
return -ENODEV;
}
- platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL);
- if (!platform) {
+ platform = devm_drm_bridge_alloc(dev, struct anx7625_data, bridge, &anx7625_bridge_funcs);
+ if (IS_ERR(platform)) {
DRM_DEV_ERROR(dev, "fail to allocate driver data\n");
- return -ENOMEM;
+ return PTR_ERR(platform);
}
pdata = &platform->pdata;
diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c
index c179b86d208f..b63304d3a80f 100644
--- a/drivers/gpu/drm/bridge/aux-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-bridge.c
@@ -5,6 +5,7 @@
* Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
*/
#include <linux/auxiliary_bus.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -109,9 +110,10 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
{
struct drm_aux_bridge_data *data;
- data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = devm_drm_bridge_alloc(&auxdev->dev, struct drm_aux_bridge_data,
+ bridge, &drm_aux_bridge_funcs);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
data->dev = &auxdev->dev;
data->next_bridge = devm_drm_of_get_bridge(&auxdev->dev, auxdev->dev.of_node, 0, 0);
@@ -119,7 +121,6 @@ static int drm_aux_bridge_probe(struct auxiliary_device *auxdev,
return dev_err_probe(&auxdev->dev, PTR_ERR(data->next_bridge),
"failed to acquire drm_bridge\n");
- data->bridge.funcs = &drm_aux_bridge_funcs;
data->bridge.of_node = data->dev->of_node;
/* passthrough data, allow everything */
diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
index b3f588b71a7d..e579f947e15b 100644
--- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c
+++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c
@@ -5,6 +5,7 @@
* Author: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
*/
#include <linux/auxiliary_bus.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -171,12 +172,13 @@ static int drm_aux_hpd_bridge_probe(struct auxiliary_device *auxdev,
{
struct drm_aux_hpd_bridge_data *data;
- data = devm_kzalloc(&auxdev->dev, sizeof(*data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ data = devm_drm_bridge_alloc(&auxdev->dev,
+ struct drm_aux_hpd_bridge_data, bridge,
+ &drm_aux_hpd_bridge_funcs);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
data->dev = &auxdev->dev;
- data->bridge.funcs = &drm_aux_hpd_bridge_funcs;
data->bridge.of_node = dev_get_platdata(data->dev);
data->bridge.ops = DRM_BRIDGE_OP_HPD;
data->bridge.type = id->driver_data;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
index b022dd6e6b6e..a57ca8c3bdae 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c
@@ -670,13 +670,28 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
-static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
u32 val;
+ /*
+ * The cdns-dsi controller needs to be disabled after it's DPI source
+ * has stopped streaming. If this is not followed, there is a brief
+ * window before DPI source is disabled and after cdns-dsi controller
+ * has been disabled where the DPI stream is still on, but the cdns-dsi
+ * controller is not ready anymore to accept the incoming signals. This
+ * is one of the reasons why a shift in pixel colors is observed on
+ * displays that have cdns-dsi as one of the bridges.
+ *
+ * To mitigate this, disable this bridge from the bridge post_disable()
+ * hook, instead of the bridge _disable() hook. The bridge post_disable()
+ * hook gets called after the CRTC disable, where often many DPI sources
+ * disable their streams.
+ */
+
val = readl(dsi->regs + MCTL_MAIN_DATA_CTL);
val &= ~(IF_VID_SELECT_MASK | IF_VID_MODE | VID_EN | HOST_EOT_GEN |
DISP_EOT_GEN);
@@ -688,15 +703,6 @@ static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
if (dsi->platform_ops && dsi->platform_ops->disable)
dsi->platform_ops->disable(dsi);
- pm_runtime_put(dsi->base.dev);
-}
-
-static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
-{
- struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
- struct cdns_dsi *dsi = input_to_dsi(input);
-
dsi->phy_initialized = false;
dsi->link_initialized = false;
phy_power_off(dsi->dphy);
@@ -774,8 +780,8 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi)
dsi->link_initialized = true;
}
-static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -792,6 +798,21 @@ static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
u32 tmp, reg_wakeup, div, status;
int nlanes;
+ /*
+ * The cdns-dsi controller needs to be enabled before it's DPI source
+ * has begun streaming. If this is not followed, there is a brief window
+ * after DPI source enable and before cdns-dsi controller enable where
+ * the DPI stream is on, but the cdns-dsi controller is not ready to
+ * accept the incoming signals. This is one of the reasons why a shift
+ * in pixel colors is observed on displays that have cdns-dsi as one of
+ * the bridges.
+ *
+ * To mitigate this, enable this bridge from the bridge pre_enable()
+ * hook, instead of the bridge _enable() hook. The bridge pre_enable()
+ * hook gets called before the CRTC enable, where often many DPI sources
+ * enable their streams.
+ */
+
if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
return;
@@ -811,8 +832,8 @@ static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
mode = &crtc_state->adjusted_mode;
nlanes = output->dev->lanes;
- cdns_dsi_hs_init(dsi);
cdns_dsi_init_link(dsi);
+ cdns_dsi_hs_init(dsi);
/*
* Now that the DSI Link and DSI Phy are initialized,
@@ -941,19 +962,6 @@ static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
writel(tmp, dsi->regs + MCTL_MAIN_EN);
}
-static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
-{
- struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
- struct cdns_dsi *dsi = input_to_dsi(input);
-
- if (WARN_ON(pm_runtime_get_sync(dsi->base.dev) < 0))
- return;
-
- cdns_dsi_init_link(dsi);
- cdns_dsi_hs_init(dsi);
-}
-
static u32 *cdns_dsi_bridge_get_input_bus_fmts(struct drm_bridge *bridge,
struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
@@ -1048,9 +1056,7 @@ cdns_dsi_bridge_atomic_reset(struct drm_bridge *bridge)
static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = {
.attach = cdns_dsi_bridge_attach,
.mode_valid = cdns_dsi_bridge_mode_valid,
- .atomic_disable = cdns_dsi_bridge_atomic_disable,
.atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable,
- .atomic_enable = cdns_dsi_bridge_atomic_enable,
.atomic_post_disable = cdns_dsi_bridge_atomic_post_disable,
.atomic_check = cdns_dsi_bridge_atomic_check,
.atomic_reset = cdns_dsi_bridge_atomic_reset,
@@ -1289,9 +1295,10 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
int ret, irq;
u32 val;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct cdns_dsi, input.bridge,
+ &cdns_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
@@ -1349,7 +1356,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
* CDNS_DPI_INPUT.
*/
input->id = CDNS_DPI_INPUT;
- input->bridge.funcs = &cdns_dsi_bridge_funcs;
input->bridge.of_node = pdev->dev.of_node;
/* Mask all interrupts before registering the IRQ handler. */
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index b431e7efd1f0..cb5f5a8c539a 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -2389,9 +2389,10 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
int ret;
int irq;
- mhdp = devm_kzalloc(dev, sizeof(*mhdp), GFP_KERNEL);
- if (!mhdp)
- return -ENOMEM;
+ mhdp = devm_drm_bridge_alloc(dev, struct cdns_mhdp_device, bridge,
+ &cdns_mhdp_bridge_funcs);
+ if (IS_ERR(mhdp))
+ return PTR_ERR(mhdp);
clk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(clk)) {
@@ -2481,7 +2482,6 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
mhdp->display_fmt.bpc = 8;
mhdp->bridge.of_node = pdev->dev.of_node;
- mhdp->bridge.funcs = &cdns_mhdp_bridge_funcs;
mhdp->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
mhdp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c
index 634c5b030667..814713c5bea9 100644
--- a/drivers/gpu/drm/bridge/chipone-icn6211.c
+++ b/drivers/gpu/drm/bridge/chipone-icn6211.c
@@ -691,9 +691,10 @@ static int chipone_common_probe(struct device *dev, struct chipone **icnr)
struct chipone *icn;
int ret;
- icn = devm_kzalloc(dev, sizeof(struct chipone), GFP_KERNEL);
- if (!icn)
- return -ENOMEM;
+ icn = devm_drm_bridge_alloc(dev, struct chipone, bridge,
+ &chipone_bridge_funcs);
+ if (IS_ERR(icn))
+ return PTR_ERR(icn);
icn->dev = dev;
@@ -701,7 +702,6 @@ static int chipone_common_probe(struct device *dev, struct chipone **icnr)
if (ret)
return ret;
- icn->bridge.funcs = &chipone_bridge_funcs;
icn->bridge.type = DRM_MODE_CONNECTOR_DPI;
icn->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
index 210c45c1efd4..ab9274793356 100644
--- a/drivers/gpu/drm/bridge/chrontel-ch7033.c
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -536,9 +536,10 @@ static int ch7033_probe(struct i2c_client *client)
unsigned int val;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct ch7033_priv, bridge,
+ &ch7033_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
@@ -575,7 +576,6 @@ static int ch7033_probe(struct i2c_client *client)
}
INIT_LIST_HEAD(&priv->bridge.list);
- priv->bridge.funcs = &ch7033_bridge_funcs;
priv->bridge.of_node = dev->of_node;
drm_bridge_add(&priv->bridge);
diff --git a/drivers/gpu/drm/bridge/cros-ec-anx7688.c b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
index c8abd9920fee..a35dae9b56e2 100644
--- a/drivers/gpu/drm/bridge/cros-ec-anx7688.c
+++ b/drivers/gpu/drm/bridge/cros-ec-anx7688.c
@@ -103,9 +103,10 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
u8 buffer[4];
int ret;
- anx7688 = devm_kzalloc(dev, sizeof(*anx7688), GFP_KERNEL);
- if (!anx7688)
- return -ENOMEM;
+ anx7688 = devm_drm_bridge_alloc(dev, struct cros_ec_anx7688, bridge,
+ &cros_ec_anx7688_bridge_funcs);
+ if (IS_ERR(anx7688))
+ return PTR_ERR(anx7688);
anx7688->client = client;
i2c_set_clientdata(client, anx7688);
@@ -153,7 +154,6 @@ static int cros_ec_anx7688_bridge_probe(struct i2c_client *client)
DRM_WARN("Old ANX7688 FW version (0x%04x), not filtering\n",
fw_version);
- anx7688->bridge.funcs = &cros_ec_anx7688_bridge_funcs;
drm_bridge_add(&anx7688->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c
index 2cb6dfc7a6d3..5c3cf37200bc 100644
--- a/drivers/gpu/drm/bridge/fsl-ldb.c
+++ b/drivers/gpu/drm/bridge/fsl-ldb.c
@@ -298,16 +298,15 @@ static int fsl_ldb_probe(struct platform_device *pdev)
struct fsl_ldb *fsl_ldb;
int dual_link;
- fsl_ldb = devm_kzalloc(dev, sizeof(*fsl_ldb), GFP_KERNEL);
- if (!fsl_ldb)
- return -ENOMEM;
+ fsl_ldb = devm_drm_bridge_alloc(dev, struct fsl_ldb, bridge, &funcs);
+ if (IS_ERR(fsl_ldb))
+ return PTR_ERR(fsl_ldb);
fsl_ldb->devdata = of_device_get_match_data(dev);
if (!fsl_ldb->devdata)
return -EINVAL;
fsl_ldb->dev = &pdev->dev;
- fsl_ldb->bridge.funcs = &funcs;
fsl_ldb->bridge.of_node = dev->of_node;
fsl_ldb->clk = devm_clk_get(dev, "ldb");
diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
index f072c6ed39ef..0e31d5000e7c 100644
--- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
+++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c
@@ -5,6 +5,8 @@
* bridge driver for legacy DT bindings, utilizing display-timings node
*/
+#include <linux/export.h>
+
#include <drm/drm_bridge.h>
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
@@ -59,9 +61,10 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
struct imx_legacy_bridge *imx_bridge;
int ret;
- imx_bridge = devm_kzalloc(dev, sizeof(*imx_bridge), GFP_KERNEL);
- if (!imx_bridge)
- return ERR_PTR(-ENOMEM);
+ imx_bridge = devm_drm_bridge_alloc(dev, struct imx_legacy_bridge,
+ base, &imx_legacy_bridge_funcs);
+ if (IS_ERR(imx_bridge))
+ return ERR_CAST(imx_bridge);
ret = of_get_drm_display_mode(np,
&imx_bridge->mode,
@@ -72,7 +75,6 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev,
imx_bridge->mode.type |= DRM_MODE_TYPE_DRIVER;
- imx_bridge->base.funcs = &imx_legacy_bridge_funcs;
imx_bridge->base.of_node = np;
imx_bridge->base.ops = DRM_BRIDGE_OP_MODES;
imx_bridge->base.type = type;
diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
index 8a4fd7d77a8d..3a6f8587a257 100644
--- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c
@@ -140,9 +140,10 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
struct device_node *remote;
struct imx8mp_hdmi_pvi *pvi;
- pvi = devm_kzalloc(&pdev->dev, sizeof(*pvi), GFP_KERNEL);
- if (!pvi)
- return -ENOMEM;
+ pvi = devm_drm_bridge_alloc(&pdev->dev, struct imx8mp_hdmi_pvi,
+ bridge, &imx_hdmi_pvi_bridge_funcs);
+ if (IS_ERR(pvi))
+ return PTR_ERR(pvi);
platform_set_drvdata(pdev, pvi);
pvi->dev = &pdev->dev;
@@ -166,7 +167,6 @@ static int imx8mp_hdmi_pvi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
/* Register the bridge. */
- pvi->bridge.funcs = &imx_hdmi_pvi_bridge_funcs;
pvi->bridge.of_node = pdev->dev.of_node;
pvi->bridge.timings = pvi->next_bridge->timings;
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
index 1f6fd488e703..8517b1c953d4 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c
@@ -63,12 +63,11 @@ struct imx8qxp_pc_channel {
struct drm_bridge *next_bridge;
struct imx8qxp_pc *pc;
unsigned int stream_id;
- bool is_available;
};
struct imx8qxp_pc {
struct device *dev;
- struct imx8qxp_pc_channel ch[2];
+ struct imx8qxp_pc_channel *ch[2];
struct clk *clk_apb;
void __iomem *base;
};
@@ -307,7 +306,14 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
goto free_child;
}
- ch = &pc->ch[i];
+ ch = devm_drm_bridge_alloc(dev, struct imx8qxp_pc_channel, bridge,
+ &imx8qxp_pc_bridge_funcs);
+ if (IS_ERR(ch)) {
+ ret = PTR_ERR(ch);
+ goto free_child;
+ }
+
+ pc->ch[i] = ch;
ch->pc = pc;
ch->stream_id = i;
@@ -333,9 +339,7 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
of_node_put(remote);
ch->bridge.driver_private = ch;
- ch->bridge.funcs = &imx8qxp_pc_bridge_funcs;
ch->bridge.of_node = child;
- ch->is_available = true;
drm_bridge_add(&ch->bridge);
}
@@ -345,8 +349,8 @@ static int imx8qxp_pc_bridge_probe(struct platform_device *pdev)
free_child:
of_node_put(child);
- if (i == 1 && pc->ch[0].next_bridge)
- drm_bridge_remove(&pc->ch[0].bridge);
+ if (i == 1 && pc->ch[0]->next_bridge)
+ drm_bridge_remove(&pc->ch[0]->bridge);
pm_runtime_disable(dev);
return ret;
@@ -359,13 +363,10 @@ static void imx8qxp_pc_bridge_remove(struct platform_device *pdev)
int i;
for (i = 0; i < 2; i++) {
- ch = &pc->ch[i];
-
- if (!ch->is_available)
- continue;
+ ch = pc->ch[i];
- drm_bridge_remove(&ch->bridge);
- ch->is_available = false;
+ if (ch)
+ drm_bridge_remove(&ch->bridge);
}
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
index e092c9ea99b0..e5943506981d 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c
@@ -327,9 +327,10 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- pl = devm_kzalloc(dev, sizeof(*pl), GFP_KERNEL);
- if (!pl)
- return -ENOMEM;
+ pl = devm_drm_bridge_alloc(dev, struct imx8qxp_pixel_link, bridge,
+ &imx8qxp_pixel_link_bridge_funcs);
+ if (IS_ERR(pl))
+ return PTR_ERR(pl);
ret = imx_scu_get_handle(&pl->ipc_handle);
if (ret) {
@@ -384,7 +385,6 @@ static int imx8qxp_pixel_link_bridge_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pl);
pl->bridge.driver_private = pl;
- pl->bridge.funcs = &imx8qxp_pixel_link_bridge_funcs;
pl->bridge.of_node = np;
drm_bridge_add(&pl->bridge);
diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
index da138ab51b3b..111310acab2c 100644
--- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
+++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c
@@ -392,9 +392,10 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
int ret;
- p2d = devm_kzalloc(dev, sizeof(*p2d), GFP_KERNEL);
- if (!p2d)
- return -ENOMEM;
+ p2d = devm_drm_bridge_alloc(dev, struct imx8qxp_pxl2dpi, bridge,
+ &imx8qxp_pxl2dpi_bridge_funcs);
+ if (IS_ERR(p2d))
+ return PTR_ERR(p2d);
p2d->regmap = syscon_node_to_regmap(np->parent);
if (IS_ERR(p2d->regmap)) {
@@ -441,7 +442,6 @@ static int imx8qxp_pxl2dpi_bridge_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
p2d->bridge.driver_private = p2d;
- p2d->bridge.funcs = &imx8qxp_pxl2dpi_bridge_funcs;
p2d->bridge.of_node = np;
drm_bridge_add(&p2d->bridge);
diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c
index a3a63a977b0a..c4eedf643f39 100644
--- a/drivers/gpu/drm/bridge/ite-it6263.c
+++ b/drivers/gpu/drm/bridge/ite-it6263.c
@@ -816,9 +816,10 @@ static int it6263_probe(struct i2c_client *client)
struct it6263 *it;
int ret;
- it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL);
- if (!it)
- return -ENOMEM;
+ it = devm_drm_bridge_alloc(dev, struct it6263, bridge,
+ &it6263_bridge_funcs);
+ if (IS_ERR(it))
+ return PTR_ERR(it);
it->dev = dev;
it->hdmi_i2c = client;
@@ -866,7 +867,6 @@ static int it6263_probe(struct i2c_client *client)
i2c_set_clientdata(client, it);
- it->bridge.funcs = &it6263_bridge_funcs;
it->bridge.of_node = dev->of_node;
/* IT6263 chip doesn't support HPD interrupt. */
it->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c
index 1383d1e21afe..b0dc9280d870 100644
--- a/drivers/gpu/drm/bridge/ite-it6505.c
+++ b/drivers/gpu/drm/bridge/ite-it6505.c
@@ -3583,9 +3583,10 @@ static int it6505_i2c_probe(struct i2c_client *client)
struct extcon_dev *extcon;
int err;
- it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL);
- if (!it6505)
- return -ENOMEM;
+ it6505 = devm_drm_bridge_alloc(&client->dev, struct it6505, bridge,
+ &it6505_bridge_funcs);
+ if (IS_ERR(it6505))
+ return PTR_ERR(it6505);
mutex_init(&it6505->extcon_lock);
mutex_init(&it6505->mode_lock);
@@ -3660,7 +3661,6 @@ static int it6505_i2c_probe(struct i2c_client *client)
it6505->aux.transfer = it6505_aux_transfer;
drm_dp_aux_init(&it6505->aux);
- it6505->bridge.funcs = &it6505_bridge_funcs;
it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 7b110ae53291..6494f0842793 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -1516,9 +1516,10 @@ static int it66121_probe(struct i2c_client *client)
return -ENXIO;
}
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct it66121_ctx, bridge,
+ &it66121_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
if (!ep)
@@ -1577,7 +1578,6 @@ static int it66121_probe(struct i2c_client *client)
return -ENODEV;
}
- ctx->bridge.funcs = &it66121_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 3e49d855b364..bd83228b0f0e 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -761,9 +761,10 @@ static int lt8912_probe(struct i2c_client *client)
int ret = 0;
struct device *dev = &client->dev;
- lt = devm_kzalloc(dev, sizeof(struct lt8912), GFP_KERNEL);
- if (!lt)
- return -ENOMEM;
+ lt = devm_drm_bridge_alloc(dev, struct lt8912, bridge,
+ &lt8912_bridge_funcs);
+ if (IS_ERR(lt))
+ return PTR_ERR(lt);
lt->dev = dev;
lt->i2c_client[0] = client;
@@ -778,7 +779,6 @@ static int lt8912_probe(struct i2c_client *client)
i2c_set_clientdata(client, lt);
- lt->bridge.funcs = &lt8912_bridge_funcs;
lt->bridge.of_node = dev->of_node;
lt->bridge.ops = (DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_DETECT);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c
index 9b2dac9bd63c..399fa7eebd49 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9211.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9211.c
@@ -727,9 +727,9 @@ static int lt9211_probe(struct i2c_client *client)
struct lt9211 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct lt9211, bridge, &lt9211_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -755,7 +755,6 @@ static int lt9211_probe(struct i2c_client *client)
dev_set_drvdata(dev, ctx);
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &lt9211_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c
index a35a8b8ca89c..d6ee79c1e427 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611.c
@@ -1072,9 +1072,10 @@ static int lt9611_probe(struct i2c_client *client)
return -ENODEV;
}
- lt9611 = devm_kzalloc(dev, sizeof(*lt9611), GFP_KERNEL);
- if (!lt9611)
- return -ENOMEM;
+ lt9611 = devm_drm_bridge_alloc(dev, struct lt9611, bridge,
+ &lt9611_bridge_funcs);
+ if (IS_ERR(lt9611))
+ return PTR_ERR(lt9611);
lt9611->dev = dev;
lt9611->client = client;
@@ -1127,7 +1128,6 @@ static int lt9611_probe(struct i2c_client *client)
/* Disable Audio InfoFrame, enabled by default */
regmap_update_bits(lt9611->regmap, 0x843d, LT9611_INFOFRAME_AUDIO, 0);
- lt9611->bridge.funcs = &lt9611_bridge_funcs;
lt9611->bridge.of_node = client->dev.of_node;
lt9611->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HPD | DRM_BRIDGE_OP_MODES |
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 1646e454e0b0..e6a7147e141b 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -118,9 +118,10 @@ static int lvds_codec_probe(struct platform_device *pdev)
u32 val;
int ret;
- lvds_codec = devm_kzalloc(dev, sizeof(*lvds_codec), GFP_KERNEL);
- if (!lvds_codec)
- return -ENOMEM;
+ lvds_codec = devm_drm_bridge_alloc(dev, struct lvds_codec, bridge,
+ &funcs);
+ if (IS_ERR(lvds_codec))
+ return PTR_ERR(lvds_codec);
lvds_codec->dev = &pdev->dev;
lvds_codec->connector_type = (uintptr_t)of_device_get_match_data(dev);
@@ -156,8 +157,6 @@ static int lvds_codec_probe(struct platform_device *pdev)
if (IS_ERR(lvds_codec->panel_bridge))
return PTR_ERR(lvds_codec->panel_bridge);
- lvds_codec->bridge.funcs = &funcs;
-
/*
* Decoder input LVDS format is a property of the decoder chip or even
* its strapping. Handle data-mapping the same way lvds-panel does. In
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index 15a5a1f644fc..81dde9ed7bcf 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -225,13 +225,11 @@ static int ge_b850v3_lvds_init(struct device *dev)
if (ge_b850v3_lvds_ptr)
goto success;
- ge_b850v3_lvds_ptr = devm_kzalloc(dev,
- sizeof(*ge_b850v3_lvds_ptr),
- GFP_KERNEL);
-
- if (!ge_b850v3_lvds_ptr) {
+ ge_b850v3_lvds_ptr = devm_drm_bridge_alloc(dev, struct ge_b850v3_lvds, bridge,
+ &ge_b850v3_lvds_funcs);
+ if (IS_ERR(ge_b850v3_lvds_ptr)) {
mutex_unlock(&ge_b850v3_lvds_dev_mutex);
- return -ENOMEM;
+ return PTR_ERR(ge_b850v3_lvds_ptr);
}
success:
@@ -264,7 +262,6 @@ static int ge_b850v3_register(void)
struct device *dev = &stdp4028_i2c->dev;
/* drm bridge initialization */
- ge_b850v3_lvds_ptr->bridge.funcs = &ge_b850v3_lvds_funcs;
ge_b850v3_lvds_ptr->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID;
ge_b850v3_lvds_ptr->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;
diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c
index 1d4ae0097df8..9f4ff82bc6b4 100644
--- a/drivers/gpu/drm/bridge/microchip-lvds.c
+++ b/drivers/gpu/drm/bridge/microchip-lvds.c
@@ -157,9 +157,10 @@ static int mchp_lvds_probe(struct platform_device *pdev)
if (!dev->of_node)
return -ENODEV;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(&pdev->dev, struct mchp_lvds, bridge,
+ &mchp_lvds_bridge_funcs);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = dev;
@@ -192,7 +193,6 @@ static int mchp_lvds_probe(struct platform_device *pdev)
lvds->bridge.of_node = dev->of_node;
lvds->bridge.type = DRM_MODE_CONNECTOR_LVDS;
- lvds->bridge.funcs = &mchp_lvds_bridge_funcs;
dev_set_drvdata(dev, lvds);
ret = devm_pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 55912ae11f46..2f7429b24fc2 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -1149,9 +1149,10 @@ static int nwl_dsi_probe(struct platform_device *pdev)
struct nwl_dsi *dsi;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct nwl_dsi, bridge,
+ &nwl_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->dev = dev;
@@ -1180,7 +1181,6 @@ static int nwl_dsi_probe(struct platform_device *pdev)
dsi->quirks = (uintptr_t)attr->data;
dsi->bridge.driver_private = dsi;
- dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.timings = &nwl_dsi_timings;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 25d7c415478b..7acb11f16dc1 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -261,10 +261,10 @@ static int ptn3460_probe(struct i2c_client *client)
struct drm_bridge *panel_bridge;
int ret;
- ptn_bridge = devm_kzalloc(dev, sizeof(*ptn_bridge), GFP_KERNEL);
- if (!ptn_bridge) {
- return -ENOMEM;
- }
+ ptn_bridge = devm_drm_bridge_alloc(dev, struct ptn3460_bridge, bridge,
+ &ptn3460_bridge_funcs);
+ if (IS_ERR(ptn_bridge))
+ return PTR_ERR(ptn_bridge);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
@@ -300,7 +300,6 @@ static int ptn3460_probe(struct i2c_client *client)
return ret;
}
- ptn_bridge->bridge.funcs = &ptn3460_bridge_funcs;
ptn_bridge->bridge.ops = DRM_BRIDGE_OP_EDID;
ptn_bridge->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ptn_bridge->bridge.of_node = dev->of_node;
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 79b009ab9396..6361a943e213 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -5,6 +5,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -287,15 +288,14 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
if (!panel)
return ERR_PTR(-EINVAL);
- panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge),
- GFP_KERNEL);
- if (!panel_bridge)
- return ERR_PTR(-ENOMEM);
+ panel_bridge = devm_drm_bridge_alloc(panel->dev, struct panel_bridge, bridge,
+ &panel_bridge_bridge_funcs);
+ if (IS_ERR(panel_bridge))
+ return (void *)panel_bridge;
panel_bridge->connector_type = connector_type;
panel_bridge->panel = panel;
- panel_bridge->bridge.funcs = &panel_bridge_bridge_funcs;
panel_bridge->bridge.of_node = panel->dev->of_node;
panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
panel_bridge->bridge.type = connector_type;
@@ -327,7 +327,8 @@ void drm_panel_bridge_remove(struct drm_bridge *bridge)
panel_bridge = drm_bridge_to_panel_bridge(bridge);
drm_bridge_remove(bridge);
- devm_kfree(panel_bridge->panel->dev, bridge);
+ /* TODO remove this after reworking panel_bridge lifetime */
+ devm_drm_put_bridge(panel_bridge->panel->dev, bridge);
}
EXPORT_SYMBOL(drm_panel_bridge_remove);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 8726fefc5c65..f879a1df077d 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -449,9 +449,10 @@ static int ps8622_probe(struct i2c_client *client)
struct drm_bridge *panel_bridge;
int ret;
- ps8622 = devm_kzalloc(dev, sizeof(*ps8622), GFP_KERNEL);
- if (!ps8622)
- return -ENOMEM;
+ ps8622 = devm_drm_bridge_alloc(dev, struct ps8622_bridge, bridge,
+ &ps8622_bridge_funcs);
+ if (IS_ERR(ps8622))
+ return PTR_ERR(ps8622);
panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
if (IS_ERR(panel_bridge))
@@ -509,7 +510,6 @@ static int ps8622_probe(struct i2c_client *client)
ps8622->bl->props.brightness = PS8622_MAX_BRIGHTNESS;
}
- ps8622->bridge.funcs = &ps8622_bridge_funcs;
ps8622->bridge.type = DRM_MODE_CONNECTOR_LVDS;
ps8622->bridge.of_node = dev->of_node;
drm_bridge_add(&ps8622->bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 2422ff68c104..825777a5758f 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -636,9 +636,10 @@ static int ps8640_probe(struct i2c_client *client)
int ret;
u32 i;
- ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
- if (!ps_bridge)
- return -ENOMEM;
+ ps_bridge = devm_drm_bridge_alloc(dev, struct ps8640, bridge,
+ &ps8640_bridge_funcs);
+ if (IS_ERR(ps_bridge))
+ return PTR_ERR(ps_bridge);
mutex_init(&ps_bridge->aux_lock);
@@ -662,7 +663,6 @@ static int ps8640_probe(struct i2c_client *client)
if (IS_ERR(ps_bridge->gpio_reset))
return PTR_ERR(ps_bridge->gpio_reset);
- ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
ps_bridge->bridge.of_node = dev->of_node;
ps_bridge->bridge.type = DRM_MODE_CONNECTOR_eDP;
diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c
index 0014c497e3fe..f2f666b27d2d 100644
--- a/drivers/gpu/drm/bridge/samsung-dsim.c
+++ b/drivers/gpu/drm/bridge/samsung-dsim.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/irq.h>
#include <linux/media-bus-format.h>
#include <linux/of.h>
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 6de61d9fe064..882973e90062 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -1135,7 +1135,6 @@ static int sii902x_init(struct sii902x *sii902x)
if (ret)
goto err_unreg_audio;
- sii902x->bridge.funcs = &sii902x_bridge_funcs;
sii902x->bridge.of_node = dev->of_node;
sii902x->bridge.timings = &default_sii902x_timings;
sii902x->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
@@ -1170,9 +1169,9 @@ static int sii902x_probe(struct i2c_client *client)
return -EIO;
}
- sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
- if (!sii902x)
- return -ENOMEM;
+ sii902x = devm_drm_bridge_alloc(dev, struct sii902x, bridge, &sii902x_bridge_funcs);
+ if (IS_ERR(sii902x))
+ return PTR_ERR(sii902x);
sii902x->i2c = client;
sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config);
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index cd7837c9a6e0..bb1bed03eb5b 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -888,9 +888,10 @@ static int sii9234_probe(struct i2c_client *client)
struct device *dev = &client->dev;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sii9234, bridge,
+ &sii9234_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
mutex_init(&ctx->lock);
@@ -921,7 +922,6 @@ static int sii9234_probe(struct i2c_client *client)
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sii9234_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 3af650dc92a1..9e48ad39e1cc 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2291,9 +2291,10 @@ static int sii8620_probe(struct i2c_client *client)
struct sii8620 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct sii8620, bridge,
+ &sii8620_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
mutex_init(&ctx->lock);
@@ -2336,7 +2337,6 @@ static int sii8620_probe(struct i2c_client *client)
i2c_set_clientdata(client, ctx);
- ctx->bridge.funcs = &sii8620_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
index 70db5b99e5bb..c66bd913e33a 100644
--- a/drivers/gpu/drm/bridge/simple-bridge.c
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -168,9 +168,10 @@ static int simple_bridge_probe(struct platform_device *pdev)
struct simple_bridge *sbridge;
struct device_node *remote;
- sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL);
- if (!sbridge)
- return -ENOMEM;
+ sbridge = devm_drm_bridge_alloc(&pdev->dev, struct simple_bridge,
+ bridge, &simple_bridge_bridge_funcs);
+ if (IS_ERR(sbridge))
+ return PTR_ERR(sbridge);
sbridge->info = of_device_get_match_data(&pdev->dev);
@@ -204,7 +205,6 @@ static int simple_bridge_probe(struct platform_device *pdev)
"Unable to retrieve enable GPIO\n");
/* Register the bridge. */
- sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
sbridge->bridge.of_node = pdev->dev.of_node;
sbridge->bridge.timings = sbridge->info->timings;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
index 5e5f8c2f95be..7ade80f02a94 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c
@@ -8,6 +8,7 @@
*/
#include <linux/completion.h>
#include <linux/hdmi.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/module.h>
@@ -1045,9 +1046,10 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
return ERR_PTR(-ENODEV);
}
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return ERR_PTR(-ENOMEM);
+ hdmi = devm_drm_bridge_alloc(dev, struct dw_hdmi_qp, bridge,
+ &dw_hdmi_qp_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return ERR_CAST(hdmi);
hdmi->dev = dev;
@@ -1073,7 +1075,6 @@ struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev,
return ERR_PTR(ret);
hdmi->bridge.driver_private = hdmi;
- hdmi->bridge.funcs = &dw_hdmi_qp_bridge_funcs;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT |
DRM_BRIDGE_OP_EDID |
DRM_BRIDGE_OP_HDMI |
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 8791408dd1ff..76c6570e2a85 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/irq.h>
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b08ada920a50..8fc2e282ff11 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -11,6 +11,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/iopoll.h>
#include <linux/math64.h>
#include <linux/media-bus-format.h>
@@ -1194,9 +1195,10 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
struct dw_mipi_dsi *dsi;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return ERR_PTR(-ENOMEM);
+ dsi = devm_drm_bridge_alloc(dev, struct dw_mipi_dsi, bridge,
+ &dw_mipi_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return ERR_CAST(dsi);
dsi->dev = dev;
dsi->plat_data = plat_data;
@@ -1265,7 +1267,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev,
}
dsi->bridge.driver_private = dsi;
- dsi->bridge.funcs = &dw_mipi_dsi_bridge_funcs;
dsi->bridge.of_node = pdev->dev.of_node;
return dsi;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
index c76f5f2e74d1..5926a3a05d79 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/export.h>
#include <linux/iopoll.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
@@ -914,9 +915,10 @@ __dw_mipi_dsi2_probe(struct platform_device *pdev,
struct dw_mipi_dsi2 *dsi2;
int ret;
- dsi2 = devm_kzalloc(dev, sizeof(*dsi2), GFP_KERNEL);
- if (!dsi2)
- return ERR_PTR(-ENOMEM);
+ dsi2 = devm_drm_bridge_alloc(dev, struct dw_mipi_dsi2, bridge,
+ &dw_mipi_dsi2_bridge_funcs);
+ if (IS_ERR(dsi2))
+ return ERR_CAST(dsi2);
dsi2->dev = dev;
dsi2->plat_data = plat_data;
@@ -981,7 +983,6 @@ __dw_mipi_dsi2_probe(struct platform_device *pdev,
}
dsi2->bridge.driver_private = dsi2;
- dsi2->bridge.funcs = &dw_mipi_dsi2_bridge_funcs;
dsi2->bridge.of_node = pdev->dev.of_node;
return dsi2;
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index edf01476f2ef..98df3e667d4a 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -265,9 +265,10 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
struct tc358762 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct tc358762), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct tc358762, bridge,
+ &tc358762_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -288,7 +289,6 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->bridge.funcs = &tc358762_bridge_funcs;
ctx->bridge.type = DRM_MODE_CONNECTOR_DPI;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 3f76c890fad9..084e9d898e22 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -347,9 +347,10 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
struct tc358764 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct tc358764), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_bridge_alloc(dev, struct tc358764, bridge,
+ &tc358764_bridge_funcs);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -368,7 +369,6 @@ static int tc358764_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return ret;
- ctx->bridge.funcs = &tc358764_bridge_funcs;
ctx->bridge.of_node = dev->of_node;
ctx->bridge.pre_enable_prev_first = true;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 063f217a17b6..fbdc44e16229 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -1287,9 +1287,10 @@ static int tc358768_i2c_probe(struct i2c_client *client)
if (!np)
return -ENODEV;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_bridge_alloc(dev, struct tc358768_priv, bridge,
+ &tc358768_bridge_funcs);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dev_set_drvdata(dev, priv);
priv->dev = dev;
@@ -1321,7 +1322,6 @@ static int tc358768_i2c_probe(struct i2c_client *client)
priv->dsi_host.dev = dev;
priv->dsi_host.ops = &tc358768_dsi_host_ops;
- priv->bridge.funcs = &tc358768_bridge_funcs;
priv->bridge.timings = &default_tc358768_timings;
priv->bridge.of_node = np;
diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c
index 1b10e6ee1724..366b12db0e7c 100644
--- a/drivers/gpu/drm/bridge/tc358775.c
+++ b/drivers/gpu/drm/bridge/tc358775.c
@@ -659,9 +659,10 @@ static int tc_probe(struct i2c_client *client)
struct tc_data *tc;
int ret;
- tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
- if (!tc)
- return -ENOMEM;
+ tc = devm_drm_bridge_alloc(dev, struct tc_data, bridge,
+ &tc_bridge_funcs);
+ if (IS_ERR(tc))
+ return PTR_ERR(tc);
tc->dev = dev;
tc->i2c = client;
@@ -701,7 +702,6 @@ static int tc_probe(struct i2c_client *client)
return ret;
}
- tc->bridge.funcs = &tc_bridge_funcs;
tc->bridge.of_node = dev->of_node;
tc->bridge.pre_enable_prev_first = true;
drm_bridge_add(&tc->bridge);
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index e2fc78adebcf..2cb7cd0c0608 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -181,9 +181,10 @@ static int thc63_probe(struct platform_device *pdev)
struct thc63_dev *thc63;
int ret;
- thc63 = devm_kzalloc(&pdev->dev, sizeof(*thc63), GFP_KERNEL);
- if (!thc63)
- return -ENOMEM;
+ thc63 = devm_drm_bridge_alloc(&pdev->dev, struct thc63_dev, bridge,
+ &thc63_bridge_func);
+ if (IS_ERR(thc63))
+ return PTR_ERR(thc63);
thc63->dev = &pdev->dev;
platform_set_drvdata(pdev, thc63);
@@ -208,7 +209,6 @@ static int thc63_probe(struct platform_device *pdev)
thc63->bridge.driver_private = thc63;
thc63->bridge.of_node = pdev->dev.of_node;
- thc63->bridge.funcs = &thc63_bridge_func;
thc63->bridge.timings = &thc63->timings;
drm_bridge_add(&thc63->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c
index 47638d1c96ec..b07f7c9d5890 100644
--- a/drivers/gpu/drm/bridge/ti-dlpc3433.c
+++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c
@@ -348,9 +348,10 @@ static int dlpc3433_probe(struct i2c_client *client)
struct dlpc *dlpc;
int ret;
- dlpc = devm_kzalloc(dev, sizeof(*dlpc), GFP_KERNEL);
- if (!dlpc)
- return -ENOMEM;
+ dlpc = devm_drm_bridge_alloc(dev, struct dlpc, bridge,
+ &dlpc_bridge_funcs);
+ if (IS_ERR(dlpc))
+ return PTR_ERR(dlpc);
dlpc->dev = dev;
@@ -365,7 +366,6 @@ static int dlpc3433_probe(struct i2c_client *client)
dev_set_drvdata(dev, dlpc);
i2c_set_clientdata(client, dlpc);
- dlpc->bridge.funcs = &dlpc_bridge_funcs;
dlpc->bridge.of_node = dev->of_node;
drm_bridge_add(&dlpc->bridge);
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 60224f476e1d..78a50b947a08 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -447,23 +447,6 @@ DEFINE_SHOW_ATTRIBUTE(status);
* Auxiliary Devices (*not* AUX)
*/
-static void ti_sn65dsi86_uninit_aux(void *data)
-{
- auxiliary_device_uninit(data);
-}
-
-static void ti_sn65dsi86_delete_aux(void *data)
-{
- auxiliary_device_delete(data);
-}
-
-static void ti_sn65dsi86_aux_device_release(struct device *dev)
-{
- struct auxiliary_device *aux = container_of(dev, struct auxiliary_device, dev);
-
- kfree(aux);
-}
-
static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
struct auxiliary_device **aux_out,
const char *name)
@@ -471,34 +454,16 @@ static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
struct device *dev = pdata->dev;
const struct i2c_client *client = to_i2c_client(dev);
struct auxiliary_device *aux;
- int ret;
+ int id;
- aux = kzalloc(sizeof(*aux), GFP_KERNEL);
+ id = (client->adapter->nr << 10) | client->addr;
+ aux = __devm_auxiliary_device_create(dev, KBUILD_MODNAME, name,
+ NULL, id);
if (!aux)
- return -ENOMEM;
-
- aux->name = name;
- aux->id = (client->adapter->nr << 10) | client->addr;
- aux->dev.parent = dev;
- aux->dev.release = ti_sn65dsi86_aux_device_release;
- device_set_of_node_from_dev(&aux->dev, dev);
- ret = auxiliary_device_init(aux);
- if (ret) {
- kfree(aux);
- return ret;
- }
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
- if (ret)
- return ret;
-
- ret = auxiliary_device_add(aux);
- if (ret)
- return ret;
- ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
- if (!ret)
- *aux_out = aux;
+ return -ENODEV;
- return ret;
+ *aux_out = aux;
+ return 0;
}
/* -----------------------------------------------------------------------------
@@ -1707,24 +1672,20 @@ static int ti_sn_bridge_gpio_get(struct gpio_chip *chip, unsigned int offset)
return !!(val & BIT(SN_GPIO_INPUT_SHIFT + offset));
}
-static void ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int val)
+static int ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
{
struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
- int ret;
if (!test_bit(offset, pdata->gchip_output)) {
dev_err(pdata->dev, "Ignoring GPIO set while input\n");
- return;
+ return -EPERM;
}
val &= 1;
- ret = regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
- BIT(SN_GPIO_OUTPUT_SHIFT + offset),
- val << (SN_GPIO_OUTPUT_SHIFT + offset));
- if (ret)
- dev_warn(pdata->dev,
- "Failed to set bridge GPIO %u: %d\n", offset, ret);
+ return regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
+ BIT(SN_GPIO_OUTPUT_SHIFT + offset),
+ val << (SN_GPIO_OUTPUT_SHIFT + offset));
}
static int ti_sn_bridge_gpio_direction_input(struct gpio_chip *chip,
@@ -1828,7 +1789,7 @@ static int ti_sn_gpio_probe(struct auxiliary_device *adev,
pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input;
pdata->gchip.direction_output = ti_sn_bridge_gpio_direction_output;
pdata->gchip.get = ti_sn_bridge_gpio_get;
- pdata->gchip.set = ti_sn_bridge_gpio_set;
+ pdata->gchip.set_rv = ti_sn_bridge_gpio_set;
pdata->gchip.can_sleep = true;
pdata->gchip.names = ti_sn_bridge_gpio_names;
pdata->gchip.ngpio = SN_NUM_GPIOS;
diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c
index cca75443f012..27053d020df7 100644
--- a/drivers/gpu/drm/bridge/ti-tdp158.c
+++ b/drivers/gpu/drm/bridge/ti-tdp158.c
@@ -68,9 +68,10 @@ static int tdp158_probe(struct i2c_client *client)
struct tdp158 *tdp158;
struct device *dev = &client->dev;
- tdp158 = devm_kzalloc(dev, sizeof(*tdp158), GFP_KERNEL);
- if (!tdp158)
- return -ENOMEM;
+ tdp158 = devm_drm_bridge_alloc(dev, struct tdp158, bridge,
+ &tdp158_bridge_funcs);
+ if (IS_ERR(tdp158))
+ return PTR_ERR(tdp158);
tdp158->next = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0);
if (IS_ERR(tdp158->next))
@@ -89,7 +90,6 @@ static int tdp158_probe(struct i2c_client *client)
return dev_err_probe(dev, PTR_ERR(tdp158->enable), "enable");
tdp158->bridge.of_node = dev->of_node;
- tdp158->bridge.funcs = &tdp158_bridge_funcs;
tdp158->bridge.driver_private = tdp158;
tdp158->dev = dev;
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index e15d232ddbac..549e8e8edeb4 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -341,14 +341,14 @@ static int tfp410_init(struct device *dev, bool i2c)
return -ENXIO;
}
- dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
- if (!dvi)
- return -ENOMEM;
+ dvi = devm_drm_bridge_alloc(dev, struct tfp410, bridge,
+ &tfp410_bridge_funcs);
+ if (IS_ERR(dvi))
+ return PTR_ERR(dvi);
dvi->dev = dev;
dev_set_drvdata(dev, dvi);
- dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
dvi->bridge.timings = &dvi->timings;
dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
index 1c289051a598..0919364e80d1 100644
--- a/drivers/gpu/drm/bridge/ti-tpd12s015.c
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -116,13 +116,13 @@ static int tpd12s015_probe(struct platform_device *pdev)
struct gpio_desc *gpio;
int ret;
- tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL);
- if (!tpd)
- return -ENOMEM;
+ tpd = devm_drm_bridge_alloc(&pdev->dev, struct tpd12s015_device,
+ bridge, &tpd12s015_bridge_funcs);
+ if (IS_ERR(tpd))
+ return PTR_ERR(tpd);
platform_set_drvdata(pdev, tpd);
- tpd->bridge.funcs = &tpd12s015_bridge_funcs;
tpd->bridge.of_node = pdev->dev.of_node;
tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
tpd->bridge.ops = DRM_BRIDGE_OP_DETECT;
diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c
index e17265039ca8..aec2fab6d2bf 100644
--- a/drivers/gpu/drm/clients/drm_client_setup.c
+++ b/drivers/gpu/drm/clients/drm_client_setup.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
+
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_device.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index 8d22b7627d41..df09cf9a8ca1 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -8,6 +8,7 @@ config DRM_DISPLAY_DP_AUX_BUS
config DRM_DISPLAY_HELPER
tristate
depends on DRM
+ select CEC_CORE if DRM_DISPLAY_DP_AUX_CEC || DRM_DISPLAY_HDMI_CEC_HELPER || CEC_NOTIFIER
help
DRM helpers for display adapters.
@@ -16,6 +17,7 @@ if DRM_DISPLAY_HELPER
config DRM_BRIDGE_CONNECTOR
bool
select DRM_DISPLAY_HDMI_AUDIO_HELPER
+ select DRM_DISPLAY_HDMI_CEC_HELPER
select DRM_DISPLAY_HDMI_STATE_HELPER
help
DRM connector implementation terminating DRM bridge chains.
@@ -23,7 +25,6 @@ config DRM_BRIDGE_CONNECTOR
config DRM_DISPLAY_DP_AUX_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
select DRM_DISPLAY_DP_HELPER
- select CEC_CORE
help
Choose this option if you want to enable HDMI CEC support for
DisplayPort/USB-C to HDMI adapters.
@@ -82,6 +83,16 @@ config DRM_DISPLAY_HDMI_AUDIO_HELPER
DRM display helpers for HDMI Audio functionality (generic HDMI Codec
implementation).
+config DRM_DISPLAY_HDMI_CEC_HELPER
+ bool
+ help
+ DRM display helpers for HDMI CEC implementation.
+
+config DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER
+ def_bool CEC_NOTIFIER
+ help
+ DRM display helpers for HDMI CEC notifiers implementation.
+
config DRM_DISPLAY_HDMI_HELPER
bool
help
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index b17879b957d5..0ff4a1ad0222 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -16,6 +16,10 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_AUDIO_HELPER) += \
drm_hdmi_audio_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_CEC_HELPER) += \
+ drm_hdmi_cec_helper.o
+drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER) += \
+ drm_hdmi_cec_notifier_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index 7d2e499ea5de..6cdb432dbc30 100644
--- a/drivers/gpu/drm/display/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -3,6 +3,7 @@
* Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -20,6 +21,7 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -113,6 +115,13 @@ struct drm_bridge_connector {
* &DRM_BRIDGE_OP_DP_AUDIO).
*/
struct drm_bridge *bridge_dp_audio;
+ /**
+ * @bridge_hdmi_cec:
+ *
+ * The bridge in the chain that implements CEC support, if any (see
+ * DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER).
+ */
+ struct drm_bridge *bridge_hdmi_cec;
};
#define to_drm_bridge_connector(x) \
@@ -546,6 +555,65 @@ static const struct drm_connector_hdmi_audio_funcs drm_bridge_connector_hdmi_aud
.mute_stream = drm_bridge_connector_audio_mute_stream,
};
+static int drm_bridge_connector_hdmi_cec_enable(struct drm_connector *connector, bool enable)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_enable(bridge, enable);
+}
+
+static int drm_bridge_connector_hdmi_cec_log_addr(struct drm_connector *connector, u8 logical_addr)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_log_addr(bridge, logical_addr);
+}
+
+static int drm_bridge_connector_hdmi_cec_transmit(struct drm_connector *connector,
+ u8 attempts,
+ u32 signal_free_time,
+ struct cec_msg *msg)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ return bridge->funcs->hdmi_cec_transmit(bridge, attempts,
+ signal_free_time,
+ msg);
+}
+
+static int drm_bridge_connector_hdmi_cec_init(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ bridge = bridge_connector->bridge_hdmi_cec;
+
+ if (!bridge->funcs->hdmi_cec_init)
+ return 0;
+
+ return bridge->funcs->hdmi_cec_init(connector, bridge);
+}
+
+static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_funcs = {
+ .init = drm_bridge_connector_hdmi_cec_init,
+ .enable = drm_bridge_connector_hdmi_cec_enable,
+ .log_addr = drm_bridge_connector_hdmi_cec_log_addr,
+ .transmit = drm_bridge_connector_hdmi_cec_transmit,
+};
+
/* -----------------------------------------------------------------------------
* Bridge Connector Initialisation
*/
@@ -662,6 +730,25 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
bridge_connector->bridge_dp_audio = bridge;
}
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ if (bridge_connector->bridge_hdmi_cec)
+ return ERR_PTR(-EBUSY);
+
+ bridge_connector->bridge_hdmi_cec = bridge;
+ }
+
+ if (bridge->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ if (bridge_connector->bridge_hdmi_cec)
+ return ERR_PTR(-EBUSY);
+
+ bridge_connector->bridge_hdmi_cec = bridge;
+
+ if (!bridge->funcs->hdmi_cec_enable ||
+ !bridge->funcs->hdmi_cec_log_addr ||
+ !bridge->funcs->hdmi_cec_transmit)
+ return ERR_PTR(-EINVAL);
+ }
+
if (!drm_bridge_get_next_bridge(bridge))
connector_type = bridge->type;
@@ -717,12 +804,33 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
ret = drm_connector_hdmi_audio_init(connector, dev,
&drm_bridge_connector_hdmi_audio_funcs,
bridge->hdmi_audio_max_i2s_playback_channels,
+ bridge->hdmi_audio_i2s_formats,
bridge->hdmi_audio_spdif_playback,
bridge->hdmi_audio_dai_port);
if (ret)
return ERR_PTR(ret);
}
+ if (bridge_connector->bridge_hdmi_cec &&
+ bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER) {
+ ret = drmm_connector_hdmi_cec_notifier_register(connector,
+ NULL,
+ bridge->hdmi_cec_dev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ if (bridge_connector->bridge_hdmi_cec &&
+ bridge_connector->bridge_hdmi_cec->ops & DRM_BRIDGE_OP_HDMI_CEC_ADAPTER) {
+ ret = drmm_connector_hdmi_cec_register(connector,
+ &drm_bridge_connector_hdmi_cec_funcs,
+ bridge->hdmi_cec_adapter_name,
+ bridge->hdmi_cec_available_las,
+ bridge->hdmi_cec_dev);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
if (bridge_connector->bridge_hpd)
diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c
index ec7eac6b595f..7b9afcf48836 100644
--- a/drivers/gpu/drm/display/drm_dp_aux_bus.c
+++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c
@@ -12,6 +12,7 @@
* to perform transactions on that bus.
*/
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index ed31471bd0e2..3b50d817c839 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -5,6 +5,7 @@
* Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index 632358b23b33..385a1bfdb272 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -24,6 +24,7 @@
#include <linux/delay.h>
#include <linux/dynamic_debug.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/iopoll.h>
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index a89f38fd3218..64e5c176d5cc 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -23,6 +23,7 @@
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c
index 076edf161048..8a4ef5438f35 100644
--- a/drivers/gpu/drm/display/drm_dp_tunnel.c
+++ b/drivers/gpu/drm/display/drm_dp_tunnel.c
@@ -3,6 +3,7 @@
* Copyright © 2023 Intel Corporation
*/
+#include <linux/export.h>
#include <linux/ref_tracker.h>
#include <linux/types.h>
diff --git a/drivers/gpu/drm/display/drm_dsc_helper.c b/drivers/gpu/drm/display/drm_dsc_helper.c
index 6900f4dac520..05996c526a8a 100644
--- a/drivers/gpu/drm/display/drm_dsc_helper.c
+++ b/drivers/gpu/drm/display/drm_dsc_helper.c
@@ -6,6 +6,7 @@
* Manasi Navare <manasi.d.navare@intel.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
index ae8a0cf595fc..7d78b02c1446 100644
--- a/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_audio_helper.c
@@ -3,6 +3,7 @@
* Copyright (c) 2024 Linaro Ltd
*/
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
@@ -143,6 +144,7 @@ static const struct hdmi_codec_ops drm_connector_hdmi_audio_ops = {
* @hdmi_codec_dev: device to be used as a parent for the HDMI Codec
* @funcs: callbacks for this HDMI Codec
* @max_i2s_playback_channels: maximum number of playback I2S channels
+ * @i2s_formats: set of I2S formats (use 0 for a bus-specific set)
* @spdif_playback: set if HDMI codec has S/PDIF playback port
* @dai_port: sound DAI port, -1 if it is not enabled
*
@@ -155,6 +157,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
struct device *hdmi_codec_dev,
const struct drm_connector_hdmi_audio_funcs *funcs,
unsigned int max_i2s_playback_channels,
+ u64 i2s_formats,
bool spdif_playback,
int dai_port)
{
@@ -162,6 +165,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
.ops = &drm_connector_hdmi_audio_ops,
.max_i2s_channels = max_i2s_playback_channels,
.i2s = !!max_i2s_playback_channels,
+ .i2s_formats = i2s_formats,
.spdif = spdif_playback,
.no_i2s_capture = true,
.no_spdif_capture = true,
diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c
new file mode 100644
index 000000000000..b4273c3522fa
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <media/cec.h>
+
+struct drm_connector_hdmi_cec_data {
+ struct cec_adapter *adapter;
+ const struct drm_connector_hdmi_cec_funcs *funcs;
+};
+
+static int drm_connector_hdmi_cec_adap_enable(struct cec_adapter *adap, bool enable)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->enable(connector, enable);
+}
+
+static int drm_connector_hdmi_cec_adap_log_addr(struct cec_adapter *adap, u8 logical_addr)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->log_addr(connector, logical_addr);
+}
+
+static int drm_connector_hdmi_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg)
+{
+ struct drm_connector *connector = cec_get_drvdata(adap);
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ return data->funcs->transmit(connector, attempts, signal_free_time, msg);
+}
+
+static const struct cec_adap_ops drm_connector_hdmi_cec_adap_ops = {
+ .adap_enable = drm_connector_hdmi_cec_adap_enable,
+ .adap_log_addr = drm_connector_hdmi_cec_adap_log_addr,
+ .adap_transmit = drm_connector_hdmi_cec_adap_transmit,
+};
+
+static void drm_connector_hdmi_cec_adapter_phys_addr_invalidate(struct drm_connector *connector)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_phys_addr_invalidate(data->adapter);
+}
+
+static void drm_connector_hdmi_cec_adapter_phys_addr_set(struct drm_connector *connector,
+ u16 addr)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_s_phys_addr(data->adapter, addr, false);
+}
+
+static void drm_connector_hdmi_cec_adapter_unregister(struct drm_device *dev, void *res)
+{
+ struct drm_connector *connector = res;
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_delete_adapter(data->adapter);
+
+ if (data->funcs->uninit)
+ data->funcs->uninit(connector);
+
+ kfree(data);
+ connector->cec.data = NULL;
+}
+
+static struct drm_connector_cec_funcs drm_connector_hdmi_cec_adapter_funcs = {
+ .phys_addr_invalidate = drm_connector_hdmi_cec_adapter_phys_addr_invalidate,
+ .phys_addr_set = drm_connector_hdmi_cec_adapter_phys_addr_set,
+};
+
+int drmm_connector_hdmi_cec_register(struct drm_connector *connector,
+ const struct drm_connector_hdmi_cec_funcs *funcs,
+ const char *name,
+ u8 available_las,
+ struct device *dev)
+{
+ struct drm_connector_hdmi_cec_data *data;
+ struct cec_connector_info conn_info;
+ struct cec_adapter *cec_adap;
+ int ret;
+
+ if (!funcs->init || !funcs->enable || !funcs->log_addr || !funcs->transmit)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->funcs = funcs;
+
+ cec_adap = cec_allocate_adapter(&drm_connector_hdmi_cec_adap_ops, connector, name,
+ CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO,
+ available_las ? : CEC_MAX_LOG_ADDRS);
+ ret = PTR_ERR_OR_ZERO(cec_adap);
+ if (ret < 0)
+ goto err_free;
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+ cec_s_conn_info(cec_adap, &conn_info);
+
+ data->adapter = cec_adap;
+
+ mutex_lock(&connector->cec.mutex);
+
+ connector->cec.data = data;
+ connector->cec.funcs = &drm_connector_hdmi_cec_adapter_funcs;
+
+ ret = funcs->init(connector);
+ if (ret < 0)
+ goto err_delete_adapter;
+
+ /*
+ * NOTE: the CEC adapter will be unregistered by drmm cleanup from
+ * drm_managed_release(), which is called from drm_dev_release()
+ * during device unbind.
+ *
+ * However, the CEC framework cleans up the CEC adapter only when the
+ * last user has closed its file descriptor, so we don't need to handle
+ * it in DRM.
+ *
+ * Before that CEC framework makes sure that even if the userspace
+ * still holds CEC device open, all calls will be shortcut via
+ * cec_is_registered(), making sure that there is no access to the
+ * freed memory.
+ */
+ ret = cec_register_adapter(cec_adap, dev);
+ if (ret < 0)
+ goto err_delete_adapter;
+
+ mutex_unlock(&connector->cec.mutex);
+
+ return drmm_add_action_or_reset(connector->dev,
+ drm_connector_hdmi_cec_adapter_unregister,
+ connector);
+
+err_delete_adapter:
+ cec_delete_adapter(cec_adap);
+
+ connector->cec.data = NULL;
+
+ mutex_unlock(&connector->cec.mutex);
+
+err_free:
+ kfree(data);
+
+ return ret;
+}
+EXPORT_SYMBOL(drmm_connector_hdmi_cec_register);
+
+void drm_connector_hdmi_cec_received_msg(struct drm_connector *connector,
+ struct cec_msg *msg)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_received_msg(data->adapter, msg);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_received_msg);
+
+void drm_connector_hdmi_cec_transmit_attempt_done(struct drm_connector *connector,
+ u8 status)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_transmit_attempt_done(data->adapter, status);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_transmit_attempt_done);
+
+void drm_connector_hdmi_cec_transmit_done(struct drm_connector *connector,
+ u8 status,
+ u8 arb_lost_cnt, u8 nack_cnt,
+ u8 low_drive_cnt, u8 error_cnt)
+{
+ struct drm_connector_hdmi_cec_data *data = connector->cec.data;
+
+ cec_transmit_done(data->adapter, status,
+ arb_lost_cnt, nack_cnt, low_drive_cnt, error_cnt);
+}
+EXPORT_SYMBOL(drm_connector_hdmi_cec_transmit_done);
diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c
new file mode 100644
index 000000000000..31b8e4a93e24
--- /dev/null
+++ b/drivers/gpu/drm/display/drm_hdmi_cec_notifier_helper.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (c) 2024 Linaro Ltd
+ */
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_managed.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
+
+#include <linux/export.h>
+#include <linux/mutex.h>
+
+#include <media/cec.h>
+#include <media/cec-notifier.h>
+
+static void drm_connector_hdmi_cec_notifier_phys_addr_invalidate(struct drm_connector *connector)
+{
+ cec_notifier_phys_addr_invalidate(connector->cec.data);
+}
+
+static void drm_connector_hdmi_cec_notifier_phys_addr_set(struct drm_connector *connector,
+ u16 addr)
+{
+ cec_notifier_set_phys_addr(connector->cec.data, addr);
+}
+
+static void drm_connector_hdmi_cec_notifier_unregister(struct drm_device *dev, void *res)
+{
+ struct drm_connector *connector = res;
+
+ cec_notifier_conn_unregister(connector->cec.data);
+ connector->cec.data = NULL;
+}
+
+static const struct drm_connector_cec_funcs drm_connector_cec_notifier_funcs = {
+ .phys_addr_invalidate = drm_connector_hdmi_cec_notifier_phys_addr_invalidate,
+ .phys_addr_set = drm_connector_hdmi_cec_notifier_phys_addr_set,
+};
+
+int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev)
+{
+ struct cec_connector_info conn_info;
+ struct cec_notifier *notifier;
+
+ cec_fill_conn_info_from_drm(&conn_info, connector);
+
+ notifier = cec_notifier_conn_register(dev, port_name, &conn_info);
+ if (!notifier)
+ return -ENOMEM;
+
+ mutex_lock(&connector->cec.mutex);
+
+ connector->cec.data = notifier;
+ connector->cec.funcs = &drm_connector_cec_notifier_funcs;
+
+ mutex_unlock(&connector->cec.mutex);
+
+ return drmm_add_action_or_reset(connector->dev,
+ drm_connector_hdmi_cec_notifier_unregister,
+ connector);
+}
+EXPORT_SYMBOL(drmm_connector_hdmi_cec_notifier_register);
diff --git a/drivers/gpu/drm/display/drm_hdmi_helper.c b/drivers/gpu/drm/display/drm_hdmi_helper.c
index 855cb02b827d..6063c155bdea 100644
--- a/drivers/gpu/drm/display/drm_hdmi_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/display/drm_hdmi_helper.h>
diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
index d9d9948b29e9..a561f124be99 100644
--- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c
+++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c
@@ -1,11 +1,15 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
+#include <drm/drm_modes.h>
#include <drm/drm_print.h>
#include <drm/display/drm_hdmi_audio_helper.h>
+#include <drm/display/drm_hdmi_cec_helper.h>
#include <drm/display/drm_hdmi_helper.h>
#include <drm/display/drm_hdmi_state_helper.h>
@@ -407,6 +411,11 @@ sink_supports_format_bpc(const struct drm_connector *connector,
return false;
}
+ if (drm_mode_is_420_only(info, mode) && format != HDMI_COLORSPACE_YUV420) {
+ drm_dbg_kms(dev, "Mode can be only supported in YUV420 format.\n");
+ return false;
+ }
+
switch (format) {
case HDMI_COLORSPACE_RGB:
drm_dbg_kms(dev, "RGB Format, checking the constraints.\n");
@@ -437,9 +446,36 @@ sink_supports_format_bpc(const struct drm_connector *connector,
return true;
case HDMI_COLORSPACE_YUV420:
- /* TODO: YUV420 is unsupported at the moment. */
- drm_dbg_kms(dev, "YUV420 format isn't supported yet.\n");
- return false;
+ drm_dbg_kms(dev, "YUV420 format, checking the constraints.\n");
+
+ if (!(info->color_formats & DRM_COLOR_FORMAT_YCBCR420)) {
+ drm_dbg_kms(dev, "Sink doesn't support YUV420.\n");
+ return false;
+ }
+
+ if (!drm_mode_is_420(info, mode)) {
+ drm_dbg_kms(dev, "Mode cannot be supported in YUV420 format.\n");
+ return false;
+ }
+
+ if (bpc == 10 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)) {
+ drm_dbg_kms(dev, "10 BPC but sink doesn't support Deep Color 30.\n");
+ return false;
+ }
+
+ if (bpc == 12 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)) {
+ drm_dbg_kms(dev, "12 BPC but sink doesn't support Deep Color 36.\n");
+ return false;
+ }
+
+ if (bpc == 16 && !(info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)) {
+ drm_dbg_kms(dev, "16 BPC but sink doesn't support Deep Color 48.\n");
+ return false;
+ }
+
+ drm_dbg_kms(dev, "YUV420 format supported in that configuration.\n");
+
+ return true;
case HDMI_COLORSPACE_YUV422:
drm_dbg_kms(dev, "YUV422 format, checking the constraints.\n");
@@ -545,8 +581,9 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
struct drm_device *dev = connector->dev;
int ret;
- drm_dbg_kms(dev, "Trying %s output format\n",
- drm_hdmi_connector_get_output_format_name(fmt));
+ drm_dbg_kms(dev, "Trying %s output format with %u bpc\n",
+ drm_hdmi_connector_get_output_format_name(fmt),
+ bpc);
if (!sink_supports_format_bpc(connector, info, mode, fmt, bpc)) {
drm_dbg_kms(dev, "%s output format not supported with %u bpc\n",
@@ -563,7 +600,7 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
return false;
}
- drm_dbg_kms(dev, "%s output format supported with %u (TMDS char rate: %llu Hz)\n",
+ drm_dbg_kms(dev, "%s output format supported with %u bpc (TMDS char rate: %llu Hz)\n",
drm_hdmi_connector_get_output_format_name(fmt),
bpc, conn_state->hdmi.tmds_char_rate);
@@ -571,23 +608,35 @@ hdmi_try_format_bpc(const struct drm_connector *connector,
}
static int
-hdmi_compute_format(const struct drm_connector *connector,
- struct drm_connector_state *conn_state,
- const struct drm_display_mode *mode,
- unsigned int bpc)
+hdmi_compute_format_bpc(const struct drm_connector *connector,
+ struct drm_connector_state *conn_state,
+ const struct drm_display_mode *mode,
+ unsigned int max_bpc, enum hdmi_colorspace fmt)
{
struct drm_device *dev = connector->dev;
+ unsigned int bpc;
+ int ret;
+
+ for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
+ ret = hdmi_try_format_bpc(connector, conn_state, mode, bpc, fmt);
+ if (!ret)
+ continue;
+
+ conn_state->hdmi.output_bpc = bpc;
+ conn_state->hdmi.output_format = fmt;
+
+ drm_dbg_kms(dev,
+ "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
+ mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
+ conn_state->hdmi.output_bpc,
+ drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
+ conn_state->hdmi.tmds_char_rate);
- /*
- * TODO: Add support for YCbCr420 output for HDMI 2.0 capable
- * devices, for modes that only support YCbCr420.
- */
- if (hdmi_try_format_bpc(connector, conn_state, mode, bpc, HDMI_COLORSPACE_RGB)) {
- conn_state->hdmi.output_format = HDMI_COLORSPACE_RGB;
return 0;
}
- drm_dbg_kms(dev, "Failed. No Format Supported for that bpc count.\n");
+ drm_dbg_kms(dev, "Failed. %s output format not supported for any bpc count.\n",
+ drm_hdmi_connector_get_output_format_name(fmt));
return -EINVAL;
}
@@ -597,33 +646,29 @@ hdmi_compute_config(const struct drm_connector *connector,
struct drm_connector_state *conn_state,
const struct drm_display_mode *mode)
{
- struct drm_device *dev = connector->dev;
unsigned int max_bpc = clamp_t(unsigned int,
conn_state->max_bpc,
8, connector->max_bpc);
- unsigned int bpc;
int ret;
- for (bpc = max_bpc; bpc >= 8; bpc -= 2) {
- drm_dbg_kms(dev, "Trying with a %d bpc output\n", bpc);
-
- ret = hdmi_compute_format(connector, conn_state, mode, bpc);
- if (ret)
- continue;
-
- conn_state->hdmi.output_bpc = bpc;
-
- drm_dbg_kms(dev,
- "Mode %ux%u @ %uHz: Found configuration: bpc: %u, fmt: %s, clock: %llu\n",
- mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
- conn_state->hdmi.output_bpc,
- drm_hdmi_connector_get_output_format_name(conn_state->hdmi.output_format),
- conn_state->hdmi.tmds_char_rate);
-
- return 0;
+ ret = hdmi_compute_format_bpc(connector, conn_state, mode, max_bpc,
+ HDMI_COLORSPACE_RGB);
+ if (ret) {
+ if (connector->ycbcr_420_allowed) {
+ ret = hdmi_compute_format_bpc(connector, conn_state,
+ mode, max_bpc,
+ HDMI_COLORSPACE_YUV420);
+ if (ret)
+ drm_dbg_kms(connector->dev,
+ "YUV420 output format doesn't work.\n");
+ } else {
+ drm_dbg_kms(connector->dev,
+ "YUV420 output format not allowed for connector.\n");
+ ret = -EINVAL;
+ }
}
- return -EINVAL;
+ return ret;
}
static int hdmi_generate_avi_infoframe(const struct drm_connector *connector,
@@ -798,12 +843,12 @@ int drm_atomic_helper_connector_hdmi_check(struct drm_connector *connector,
if (!new_conn_state->crtc || !new_conn_state->best_encoder)
return 0;
- new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
-
ret = hdmi_compute_config(connector, new_conn_state, mode);
if (ret)
return ret;
+ new_conn_state->hdmi.is_limited_range = hdmi_is_limited_range(connector, new_conn_state);
+
ret = hdmi_generate_infoframes(connector, new_conn_state);
if (ret)
return ret;
@@ -1081,9 +1126,10 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
const struct drm_edid *drm_edid;
if (status == connector_status_disconnected) {
- // TODO: also handle CEC and scramber, HDMI sink disconnected.
+ // TODO: also handle scramber, HDMI sink disconnected.
drm_connector_hdmi_audio_plugged_notify(connector, false);
drm_edid_connector_update(connector, NULL);
+ drm_connector_cec_phys_addr_invalidate(connector);
return;
}
@@ -1097,8 +1143,9 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector,
drm_edid_free(drm_edid);
if (status == connector_status_connected) {
- // TODO: also handle CEC and scramber, HDMI sink is now connected.
+ // TODO: also handle scramber, HDMI sink is now connected.
drm_connector_hdmi_audio_plugged_notify(connector, true);
+ drm_connector_cec_phys_addr_set(connector);
}
}
diff --git a/drivers/gpu/drm/display/drm_scdc_helper.c b/drivers/gpu/drm/display/drm_scdc_helper.c
index 6d2f244e5830..df878aad4a36 100644
--- a/drivers/gpu/drm/display/drm_scdc_helper.c
+++ b/drivers/gpu/drm/display/drm_scdc_helper.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 0138cf0b8b63..cd15cf52f0c9 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -26,7 +26,7 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
-
+#include <linux/export.h>
#include <linux/sync_file.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ee64ca1b1bec..ef56b474acf5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -25,6 +25,7 @@
* Daniel Vetter <daniel.vetter@ffwll.ch>
*/
+#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/ktime.h>
@@ -1160,11 +1161,10 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
}
static void
-disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
+encoder_bridge_disable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_connector *connector;
struct drm_connector_state *old_conn_state, *new_conn_state;
- struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
@@ -1224,9 +1224,15 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
else if (funcs->dpms)
funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
-
- drm_atomic_bridge_chain_post_disable(bridge, state);
}
+}
+
+static void
+crtc_disable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
const struct drm_crtc_helper_funcs *funcs;
@@ -1274,6 +1280,68 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
}
}
+static void
+encoder_bridge_post_disable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state, new_conn_state, i) {
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ /*
+ * Shut down everything that's in the changeset and currently
+ * still on. So need to check the old, saved state.
+ */
+ if (!old_conn_state->crtc)
+ continue;
+
+ old_crtc_state = drm_atomic_get_old_crtc_state(state, old_conn_state->crtc);
+
+ if (new_conn_state->crtc)
+ new_crtc_state = drm_atomic_get_new_crtc_state(state,
+ new_conn_state->crtc);
+ else
+ new_crtc_state = NULL;
+
+ if (!crtc_needs_disable(old_crtc_state, new_crtc_state) ||
+ !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+ continue;
+
+ encoder = old_conn_state->best_encoder;
+
+ /*
+ * We shouldn't get this far if we didn't previously have
+ * an encoder.. but WARN_ON() rather than explode.
+ */
+ if (WARN_ON(!encoder))
+ continue;
+
+ drm_dbg_atomic(dev, "post-disabling bridges [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call disable hooks twice.
+ */
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_post_disable(bridge, state);
+ }
+}
+
+static void
+disable_outputs(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ encoder_bridge_disable(dev, state);
+
+ crtc_disable(dev, state);
+
+ encoder_bridge_post_disable(dev, state);
+}
+
/**
* drm_atomic_helper_update_legacy_modeset_state - update legacy modeset state
* @dev: DRM device
@@ -1483,28 +1551,44 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
}
}
-/**
- * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
- * @dev: DRM device
- * @state: atomic state object being committed
- *
- * This function enables all the outputs with the new configuration which had to
- * be turned off for the update.
- *
- * For compatibility with legacy CRTC helpers this should be called after
- * drm_atomic_helper_commit_planes(), which is what the default commit function
- * does. But drivers with different needs can group the modeset commits together
- * and do the plane commits at the end. This is useful for drivers doing runtime
- * PM since planes updates then only happen when the CRTC is actually enabled.
- */
-void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
- struct drm_atomic_state *state)
+static void
+encoder_bridge_pre_enable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
+
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+
+ if (!new_conn_state->best_encoder)
+ continue;
+
+ if (!new_conn_state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(new_conn_state->crtc->state))
+ continue;
+
+ encoder = new_conn_state->best_encoder;
+
+ drm_dbg_atomic(dev, "pre-enabling bridges [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call enable hooks twice.
+ */
+ bridge = drm_bridge_chain_get_first_bridge(encoder);
+ drm_atomic_bridge_chain_pre_enable(bridge, state);
+ }
+}
+
+static void
+crtc_enable(struct drm_device *dev, struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
- struct drm_connector *connector;
- struct drm_connector_state *new_conn_state;
int i;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
@@ -1528,6 +1612,14 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
funcs->commit(crtc);
}
}
+}
+
+static void
+encoder_bridge_enable(struct drm_device *dev, struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
+ int i;
for_each_new_connector_in_state(state, connector, new_conn_state, i) {
const struct drm_encoder_helper_funcs *funcs;
@@ -1552,7 +1644,6 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
* it away), so we won't call enable hooks twice.
*/
bridge = drm_bridge_chain_get_first_bridge(encoder);
- drm_atomic_bridge_chain_pre_enable(bridge, state);
if (funcs) {
if (funcs->atomic_enable)
@@ -1565,6 +1656,30 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
drm_atomic_bridge_chain_enable(bridge, state);
}
+}
+
+/**
+ * drm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
+ * @dev: DRM device
+ * @state: atomic state object being committed
+ *
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatibility with legacy CRTC helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ encoder_bridge_pre_enable(dev, state);
+
+ crtc_enable(dev, state);
+
+ encoder_bridge_enable(dev, state);
drm_atomic_helper_commit_writebacks(dev, state);
}
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 519228eb1095..7142e163e618 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -37,6 +37,7 @@
#include <drm/drm_vblank.h>
#include <drm/drm_writeback.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dma-fence.h>
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index c2726af6698e..ecc73d52bfae 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -36,6 +36,7 @@
#include <drm/drm_writeback.h>
#include <drm/drm_vblank.h>
+#include <linux/export.h>
#include <linux/dma-fence.h>
#include <linux/uaccess.h>
#include <linux/sync_file.h>
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 22aa015df387..a2556d16bed6 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -28,6 +28,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <drm/drm_auth.h>
@@ -95,7 +96,7 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
struct drm_auth *auth = data;
int ret = 0;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
if (!file_priv->magic) {
ret = idr_alloc(&file_priv->master->magic_map, file_priv,
1, 0, GFP_KERNEL);
@@ -103,7 +104,6 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
file_priv->magic = ret;
}
auth->magic = file_priv->magic;
- mutex_unlock(&dev->master_mutex);
drm_dbg_core(dev, "%u\n", auth->magic);
@@ -118,13 +118,12 @@ int drm_authmagic(struct drm_device *dev, void *data,
drm_dbg_core(dev, "%u\n", auth->magic);
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
file = idr_find(&file_priv->master->magic_map, auth->magic);
if (file) {
file->authenticated = 1;
idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
}
- mutex_unlock(&dev->master_mutex);
return file ? 0 : -EINVAL;
}
@@ -248,41 +247,33 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
{
int ret;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
- goto out_unlock;
+ return ret;
if (drm_is_current_master_locked(file_priv))
- goto out_unlock;
+ return ret;
- if (dev->master) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ if (dev->master)
+ return -EBUSY;
- if (!file_priv->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!file_priv->master)
+ return -EINVAL;
- if (!file_priv->is_master) {
- ret = drm_new_set_master(dev, file_priv);
- goto out_unlock;
- }
+ if (!file_priv->is_master)
+ return drm_new_set_master(dev, file_priv);
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to set lessee %d as master\n",
file_priv->master->lessee_id);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
drm_set_master(dev, file_priv, false);
-out_unlock:
- mutex_unlock(&dev->master_mutex);
+
return ret;
}
@@ -299,33 +290,27 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
{
int ret;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
ret = drm_master_check_perm(dev, file_priv);
if (ret)
- goto out_unlock;
+ return ret;
- if (!drm_is_current_master_locked(file_priv)) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!drm_is_current_master_locked(file_priv))
+ return -EINVAL;
- if (!dev->master) {
- ret = -EINVAL;
- goto out_unlock;
- }
+ if (!dev->master)
+ return -EINVAL;
if (file_priv->master->lessor != NULL) {
drm_dbg_lease(dev,
"Attempt to drop lessee %d as master\n",
file_priv->master->lessee_id);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
drm_drop_master(dev, file_priv);
-out_unlock:
- mutex_unlock(&dev->master_mutex);
+
return ret;
}
@@ -337,7 +322,7 @@ int drm_master_open(struct drm_file *file_priv)
/* if there is no current master make this fd it, but do not create
* any master object for render clients
*/
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
if (!dev->master) {
ret = drm_new_set_master(dev, file_priv);
} else {
@@ -345,7 +330,6 @@ int drm_master_open(struct drm_file *file_priv)
file_priv->master = drm_master_get(dev->master);
spin_unlock(&file_priv->master_lookup_lock);
}
- mutex_unlock(&dev->master_mutex);
return ret;
}
@@ -355,7 +339,7 @@ void drm_master_release(struct drm_file *file_priv)
struct drm_device *dev = file_priv->minor->dev;
struct drm_master *master;
- mutex_lock(&dev->master_mutex);
+ guard(mutex)(&dev->master_mutex);
master = file_priv->master;
if (file_priv->magic)
idr_remove(&file_priv->master->magic_map, file_priv->magic);
@@ -376,7 +360,6 @@ out:
/* drop the master reference held by the file priv */
if (file_priv->master)
drm_master_put(&file_priv->master);
- mutex_unlock(&dev->master_mutex);
}
/**
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index b4c89ec01998..d6ce7b4c019f 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -23,6 +23,7 @@
#include <linux/debugfs.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/media-bus-format.h>
#include <linux/module.h>
#include <linux/mutex.h>
@@ -203,6 +204,8 @@ static void __drm_bridge_free(struct kref *kref)
{
struct drm_bridge *bridge = container_of(kref, struct drm_bridge, refcount);
+ if (bridge->funcs->destroy)
+ bridge->funcs->destroy(bridge);
kfree(bridge->container);
}
@@ -1392,6 +1395,23 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np)
EXPORT_SYMBOL(of_drm_find_bridge);
#endif
+/**
+ * devm_drm_put_bridge - Release a bridge reference obtained via devm
+ * @dev: device that got the bridge via devm
+ * @bridge: pointer to a struct drm_bridge obtained via devm
+ *
+ * Same as drm_bridge_put() for bridge pointers obtained via devm functions
+ * such as devm_drm_bridge_alloc().
+ *
+ * This function is a temporary workaround and MUST NOT be used. Manual
+ * handling of bridge lifetime is inherently unsafe.
+ */
+void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge)
+{
+ devm_release_action(dev, drm_bridge_put_void, bridge);
+}
+EXPORT_SYMBOL(devm_drm_put_bridge);
+
static void drm_bridge_debugfs_show_bridge(struct drm_printer *p,
struct drm_bridge *bridge,
unsigned int idx)
diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c
index af80d2496194..420f29cf3e54 100644
--- a/drivers/gpu/drm/drm_bridge_helper.c
+++ b/drivers/gpu/drm/drm_bridge_helper.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 241c855f891f..a1e652b7631d 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -5,6 +5,7 @@
#include <kunit/test-bug.h>
+#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/module.h>
#include <linux/sizes.h>
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index f1de7faf9fb4..3fa38d4ac70b 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -303,34 +304,17 @@ EXPORT_SYMBOL(drm_client_buffer_vunmap_local);
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
-int
-drm_client_buffer_vmap(struct drm_client_buffer *buffer,
- struct iosys_map *map_copy)
+int drm_client_buffer_vmap(struct drm_client_buffer *buffer,
+ struct iosys_map *map_copy)
{
- struct drm_gem_object *gem = buffer->gem;
- struct iosys_map *map = &buffer->map;
int ret;
- drm_gem_lock(gem);
-
- ret = drm_gem_pin_locked(gem);
- if (ret)
- goto err_drm_gem_pin_locked;
- ret = drm_gem_vmap_locked(gem, map);
+ ret = drm_gem_vmap(buffer->gem, &buffer->map);
if (ret)
- goto err_drm_gem_vmap;
-
- drm_gem_unlock(gem);
-
- *map_copy = *map;
+ return ret;
+ *map_copy = buffer->map;
return 0;
-
-err_drm_gem_vmap:
- drm_gem_unpin_locked(buffer->gem);
-err_drm_gem_pin_locked:
- drm_gem_unlock(gem);
- return ret;
}
EXPORT_SYMBOL(drm_client_buffer_vmap);
@@ -344,13 +328,7 @@ EXPORT_SYMBOL(drm_client_buffer_vmap);
*/
void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
- struct drm_gem_object *gem = buffer->gem;
- struct iosys_map *map = &buffer->map;
-
- drm_gem_lock(gem);
- drm_gem_vunmap_locked(gem, map);
- drm_gem_unpin_locked(gem);
- drm_gem_unlock(gem);
+ drm_gem_vunmap(buffer->gem, &buffer->map);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);
diff --git a/drivers/gpu/drm/drm_client_event.c b/drivers/gpu/drm/drm_client_event.c
index bd93cd93d519..c83196ad8b59 100644
--- a/drivers/gpu/drm/drm_client_event.c
+++ b/drivers/gpu/drm/drm_client_event.c
@@ -3,6 +3,7 @@
* Copyright 2018 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 0f9d5ba36c81..9c2c3b0c8c47 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -8,6 +8,8 @@
*/
#include "drm/drm_modeset_lock.h"
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c
index 3969dc548cff..37a3270bc3c2 100644
--- a/drivers/gpu/drm/drm_color_mgmt.c
+++ b/drivers/gpu/drm/drm_color_mgmt.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_atomic.h>
@@ -28,6 +29,7 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
+#include <kunit/visibility.h>
#include "drm_crtc_internal.h"
@@ -494,6 +496,7 @@ const char *drm_get_color_encoding_name(enum drm_color_encoding encoding)
return color_encoding_name[encoding];
}
+EXPORT_SYMBOL_IF_KUNIT(drm_get_color_encoding_name);
/**
* drm_get_color_range_name - return a string for color range
@@ -509,6 +512,7 @@ const char *drm_get_color_range_name(enum drm_color_range range)
return color_range_name[range];
}
+EXPORT_SYMBOL_IF_KUNIT(drm_get_color_range_name);
/**
* drm_plane_create_color_properties - color encoding related plane properties
@@ -630,3 +634,209 @@ int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests)
return 0;
}
EXPORT_SYMBOL(drm_color_lut_check);
+
+/*
+ * Gamma-LUT programming
+ */
+
+/**
+ * drm_crtc_load_gamma_888 - Programs gamma ramp for RGB888-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component.
+ */
+void drm_crtc_load_gamma_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ set_gamma(crtc, i, lut[i].red, lut[i].green, lut[i].blue);
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_888);
+
+/**
+ * drm_crtc_load_gamma_565_from_888 - Programs gamma ramp for RGB565-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component. The helper interpolates
+ * the individual color components to reduce the number of entries to 5/6/5.
+ */
+void drm_crtc_load_gamma_565_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+ u16 r, g, b;
+
+ for (i = 0; i < 32; ++i) {
+ r = lut[i * 8 + i / 4].red;
+ g = lut[i * 4 + i / 16].green;
+ b = lut[i * 8 + i / 4].blue;
+ set_gamma(crtc, i, r, g, b);
+ }
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = 32; i < 64; ++i) {
+ g = lut[i * 4 + i / 16].green;
+ set_gamma(crtc, i, 0, g, 0);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_565_from_888);
+
+/**
+ * drm_crtc_load_gamma_555_from_888 - Programs gamma ramp for RGB555-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The gamma ramp to program
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs the gamma ramp specified in @lut to hardware. The input gamma
+ * ramp must have 256 entries per color component. The helper interpolates
+ * the individual color components to reduce the number of entries to 5/5/5.
+ */
+void drm_crtc_load_gamma_555_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+ u16 r, g, b;
+
+ for (i = 0; i < 32; ++i) {
+ r = lut[i * 8 + i / 4].red;
+ g = lut[i * 8 + i / 4].green;
+ b = lut[i * 8 + i / 4].blue;
+ set_gamma(crtc, i, r, g, b);
+ }
+}
+EXPORT_SYMBOL(drm_crtc_load_gamma_555_from_888);
+
+static void fill_gamma_888(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 8) | r;
+ g = (g << 8) | g;
+ b = (b << 8) | b;
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_888 - Programs a default gamma ramp for RGB888-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_888(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ fill_gamma_888(crtc, i, i, i, i, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_888);
+
+static void fill_gamma_565(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 11) | (r << 6) | (r << 1) | (r >> 4);
+ g = (g << 10) | (g << 4) | (g >> 2);
+ b = (b << 11) | (b << 6) | (b << 1) | (b >> 4);
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_565 - Programs a default gamma ramp for RGB565-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_565(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; ++i)
+ fill_gamma_565(crtc, i, i, i, i, set_gamma);
+ /* Green has one more bit, so add padding with 0 for red and blue. */
+ for (i = 32; i < 64; ++i)
+ fill_gamma_565(crtc, i, 0, i, 0, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_565);
+
+static void fill_gamma_555(struct drm_crtc *crtc, unsigned int i, u16 r, u16 g, u16 b,
+ drm_crtc_set_lut_func set_gamma)
+{
+ r = (r << 11) | (r << 6) | (r << 1) | (r >> 4);
+ g = (g << 11) | (g << 6) | (g << 1) | (g >> 4);
+ b = (b << 11) | (b << 6) | (b << 1) | (r >> 4);
+
+ set_gamma(crtc, i, r, g, b);
+}
+
+/**
+ * drm_crtc_fill_gamma_555 - Programs a default gamma ramp for RGB555-like formats
+ * @crtc: The displaying CRTC
+ * @set_gamma: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default gamma ramp to hardware.
+ */
+void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma)
+{
+ unsigned int i;
+
+ for (i = 0; i < 32; ++i)
+ fill_gamma_555(crtc, i, i, i, i, set_gamma);
+}
+EXPORT_SYMBOL(drm_crtc_fill_gamma_555);
+
+/*
+ * Color-LUT programming
+ */
+
+/**
+ * drm_crtc_load_palette_8 - Programs palette for C8-like formats
+ * @crtc: The displaying CRTC
+ * @lut: The palette to program
+ * @set_palette: Callback for programming the hardware palette
+ *
+ * Programs the palette specified in @lut to hardware. The input palette
+ * must have 256 entries per color component.
+ */
+void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ set_palette(crtc, i, lut[i].red, lut[i].green, lut[i].blue);
+}
+EXPORT_SYMBOL(drm_crtc_load_palette_8);
+
+static void fill_palette_8(struct drm_crtc *crtc, unsigned int i,
+ drm_crtc_set_lut_func set_palette)
+{
+ u16 Y = (i << 8) | i; // relative luminance
+
+ set_palette(crtc, i, Y, Y, Y);
+}
+
+/**
+ * drm_crtc_fill_palette_8 - Programs a default palette for C8-like formats
+ * @crtc: The displaying CRTC
+ * @set_palette: Callback for programming the hardware gamma LUT
+ *
+ * Programs a default palette to hardware.
+ */
+void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette)
+{
+ unsigned int i;
+
+ for (i = 0; i < 256; ++i)
+ fill_palette_8(crtc, i, set_palette);
+}
+EXPORT_SYMBOL(drm_crtc_fill_palette_8);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 48b08c9611a7..0167e0e0d425 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,6 +33,7 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/uaccess.h>
@@ -279,6 +280,7 @@ static int drm_connector_init_only(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
+ mutex_init(&connector->cec.mutex);
mutex_init(&connector->eld_mutex);
mutex_init(&connector->edid_override_mutex);
mutex_init(&connector->hdmi.infoframes.lock);
@@ -702,6 +704,46 @@ static void drm_mode_remove(struct drm_connector *connector,
}
/**
+ * drm_connector_cec_phys_addr_invalidate - invalidate CEC physical address
+ * @connector: connector undergoing CEC operation
+ *
+ * Invalidated CEC physical address set for this DRM connector.
+ */
+void drm_connector_cec_phys_addr_invalidate(struct drm_connector *connector)
+{
+ mutex_lock(&connector->cec.mutex);
+
+ if (connector->cec.funcs &&
+ connector->cec.funcs->phys_addr_invalidate)
+ connector->cec.funcs->phys_addr_invalidate(connector);
+
+ mutex_unlock(&connector->cec.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cec_phys_addr_invalidate);
+
+/**
+ * drm_connector_cec_phys_addr_set - propagate CEC physical address
+ * @connector: connector undergoing CEC operation
+ *
+ * Propagate CEC physical address from the display_info to this DRM connector.
+ */
+void drm_connector_cec_phys_addr_set(struct drm_connector *connector)
+{
+ u16 addr;
+
+ mutex_lock(&connector->cec.mutex);
+
+ addr = connector->display_info.source_physical_address;
+
+ if (connector->cec.funcs &&
+ connector->cec.funcs->phys_addr_set)
+ connector->cec.funcs->phys_addr_set(connector, addr);
+
+ mutex_unlock(&connector->cec.mutex);
+}
+EXPORT_SYMBOL(drm_connector_cec_phys_addr_set);
+
+/**
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
index 44a5a36806e3..6a49e7a0ab84 100644
--- a/drivers/gpu/drm/drm_damage_helper.c
+++ b/drivers/gpu/drm/drm_damage_helper.c
@@ -30,6 +30,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 3dfd8b34dceb..abceb28b23fc 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -77,14 +77,15 @@ static int drm_clients_info(struct seq_file *m, void *data)
kuid_t uid;
seq_printf(m,
- "%20s %5s %3s master a %5s %10s %*s\n",
+ "%20s %5s %3s master a %5s %10s %*s %20s\n",
"command",
"tgid",
"dev",
"uid",
"magic",
DRM_CLIENT_NAME_MAX_LEN,
- "name");
+ "name",
+ "id");
/* dev->filelist is sorted youngest first, but we want to present
* oldest first (i.e. kernel, servers, clients), so walk backwardss.
@@ -100,7 +101,7 @@ static int drm_clients_info(struct seq_file *m, void *data)
pid = rcu_dereference(priv->pid);
task = pid_task(pid, PIDTYPE_TGID);
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
- seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s\n",
+ seq_printf(m, "%20s %5d %3d %c %c %5d %10u %*s %20llu\n",
task ? task->comm : "<unknown>",
pid_vnr(pid),
priv->minor->index,
@@ -109,7 +110,8 @@ static int drm_clients_info(struct seq_file *m, void *data)
from_kuid_munged(seq_user_ns(m), uid),
priv->magic,
DRM_CLIENT_NAME_MAX_LEN,
- priv->client_name ? priv->client_name : "<unset>");
+ priv->client_name ? priv->client_name : "<unset>",
+ priv->client_id);
rcu_read_unlock();
mutex_unlock(&priv->client_name_lock);
}
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index bbc3bc4ba844..6b43b1cf2327 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -29,6 +29,7 @@
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 56dd61f8e05a..02556363e918 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -29,11 +29,13 @@
#include <linux/bitops.h>
#include <linux/cgroup_dmem.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/sprintf.h>
#include <linux/srcu.h>
@@ -538,10 +540,15 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
}
}
+#define WEDGE_STR_LEN 32
+#define PID_STR_LEN 15
+#define COMM_STR_LEN (TASK_COMM_LEN + 5)
+
/**
* drm_dev_wedged_event - generate a device wedged uevent
* @dev: DRM device
* @method: method(s) to be used for recovery
+ * @info: optional information about the guilty task
*
* This generates a device wedged uevent for the DRM device specified by @dev.
* Recovery @method\(s) of choice will be sent in the uevent environment as
@@ -554,13 +561,13 @@ static const char *drm_get_wedge_recovery(unsigned int opt)
*
* Returns: 0 on success, negative error code otherwise.
*/
-int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method,
+ struct drm_wedge_task_info *info)
{
+ char event_string[WEDGE_STR_LEN], pid_string[PID_STR_LEN], comm_string[COMM_STR_LEN];
+ char *envp[] = { event_string, NULL, NULL, NULL };
const char *recovery = NULL;
unsigned int len, opt;
- /* Event string length up to 28+ characters with available methods */
- char event_string[32];
- char *envp[] = { event_string, NULL };
len = scnprintf(event_string, sizeof(event_string), "%s", "WEDGED=");
@@ -582,6 +589,13 @@ int drm_dev_wedged_event(struct drm_device *dev, unsigned long method)
drm_info(dev, "device wedged, %s\n", method == DRM_WEDGE_RECOVERY_NONE ?
"but recovered through reset" : "needs recovery");
+ if (info && (info->comm[0] != '\0') && (info->pid >= 0)) {
+ snprintf(pid_string, sizeof(pid_string), "PID=%u", info->pid);
+ snprintf(comm_string, sizeof(comm_string), "TASK=%s", info->comm);
+ envp[1] = pid_string;
+ envp[2] = comm_string;
+ }
+
return kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
}
EXPORT_SYMBOL(drm_dev_wedged_event);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 9c8822b337e2..a876d7ef252b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -31,6 +31,7 @@
#include <linux/bitfield.h>
#include <linux/byteorder/generic.h>
#include <linux/cec.h>
+#include <linux/export.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
diff --git a/drivers/gpu/drm/drm_exec.c b/drivers/gpu/drm/drm_exec.c
index 18e366cc4993..8d0601400182 100644
--- a/drivers/gpu/drm/drm_exec.c
+++ b/drivers/gpu/drm/drm_exec.c
@@ -2,7 +2,9 @@
#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
+
#include <linux/dma-resv.h>
+#include <linux/export.h>
/**
* DOC: Overview
diff --git a/drivers/gpu/drm/drm_fb_dma_helper.c b/drivers/gpu/drm/drm_fb_dma_helper.c
index 2c4dc7ebc0c3..fd71969d2fb1 100644
--- a/drivers/gpu/drm/drm_fb_dma_helper.c
+++ b/drivers/gpu/drm/drm_fb_dma_helper.c
@@ -17,7 +17,9 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panic.h>
#include <drm/drm_plane.h>
+
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/module.h>
/**
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 937c3939e502..11a5b60cb9ce 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -30,6 +30,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/console.h>
+#include <linux/export.h>
#include <linux/pci.h>
#include <linux/sysrq.h>
#include <linux/vga_switcheroo.h>
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 02a516e77192..8bd626ef16c7 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/fb.h>
#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/drm_fbdev_shmem.c b/drivers/gpu/drm/drm_fbdev_shmem.c
index f824369baacd..1e827bf8b815 100644
--- a/drivers/gpu/drm/drm_fbdev_shmem.c
+++ b/drivers/gpu/drm/drm_fbdev_shmem.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/fb.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/drm_fbdev_ttm.c b/drivers/gpu/drm/drm_fbdev_ttm.c
index 73d35d59590c..85feb55bba11 100644
--- a/drivers/gpu/drm/drm_fbdev_ttm.c
+++ b/drivers/gpu/drm/drm_fbdev_ttm.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: MIT
+#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 246cf845e2c9..3952e27447ee 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -33,6 +33,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -1017,8 +1018,10 @@ void drm_file_err(struct drm_file *file_priv, const char *fmt, ...)
pid = rcu_dereference(file_priv->pid);
task = pid_task(pid, PIDTYPE_TGID);
- drm_err(dev, "comm: %s pid: %d client: %s ... %pV", task ? task->comm : "Unset",
- task ? task->pid : 0, file_priv->client_name ?: "Unset", &vaf);
+ drm_err(dev, "comm: %s pid: %d client-id:%llu client: %s ... %pV",
+ task ? task->comm : "Unset",
+ task ? task->pid : 0, file_priv->client_id,
+ file_priv->client_name ?: "Unset", &vaf);
va_end(args);
rcu_read_unlock();
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index 8c6090a90d56..f5889dd8e7aa 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -21,6 +21,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <drm/drm_flip_work.h>
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index d36e6cacc575..99d9f7bbc261 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -8,6 +8,7 @@
* (at your option) any later version.
*/
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
@@ -857,11 +858,33 @@ static void drm_fb_xrgb8888_to_abgr8888_line(void *dbuf, const void *sbuf, unsig
drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_abgr8888);
}
-static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip,
- struct drm_format_conv_state *state)
+/**
+ * drm_fb_xrgb8888_to_abgr8888 - Convert XRGB8888 to ABGR8888 clip buffer
+ * @dst: Array of ABGR8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for ABGR8888 devices that don't support XRGB8888
+ * natively. It sets an opaque alpha channel as part of the conversion.
+ */
+void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
@@ -870,17 +893,40 @@ static void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned in
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_abgr8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_abgr8888);
static void drm_fb_xrgb8888_to_xbgr8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_xbgr8888);
}
-static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
- const struct iosys_map *src,
- const struct drm_framebuffer *fb,
- const struct drm_rect *clip,
- struct drm_format_conv_state *state)
+/**
+ * drm_fb_xrgb8888_to_xbgr8888 - Convert XRGB8888 to XBGR8888 clip buffer
+ * @dst: Array of XBGR8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for XBGR8888 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
{
static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
4,
@@ -889,6 +935,49 @@ static void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned in
drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
drm_fb_xrgb8888_to_xbgr8888_line);
}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_xbgr8888);
+
+static void drm_fb_xrgb8888_to_bgrx8888_line(void *dbuf, const void *sbuf, unsigned int pixels)
+{
+ drm_fb_xfrm_line_32to32(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_bgrx8888);
+}
+
+/**
+ * drm_fb_xrgb8888_to_bgrx8888 - Convert XRGB8888 to BGRX8888 clip buffer
+ * @dst: Array of BGRX8888 destination buffers
+ * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines
+ * within @dst; can be NULL if scanlines are stored next to each other.
+ * @src: Array of XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @state: Transform and conversion state
+ *
+ * This function copies parts of a framebuffer to display memory and converts the
+ * color format during the process. The parameters @dst, @dst_pitch and @src refer
+ * to arrays. Each array must have at least as many entries as there are planes in
+ * @fb's format. Each entry stores the value for the format's respective color plane
+ * at the same index.
+ *
+ * This function does not apply clipping on @dst (i.e. the destination is at the
+ * top-left corner).
+ *
+ * Drivers can use this function for BGRX8888 devices that don't support XRGB8888
+ * natively.
+ */
+void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src,
+ const struct drm_framebuffer *fb,
+ const struct drm_rect *clip,
+ struct drm_format_conv_state *state)
+{
+ static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = {
+ 4,
+ };
+
+ drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state,
+ drm_fb_xrgb8888_to_bgrx8888_line);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_bgrx8888);
static void drm_fb_xrgb8888_to_xrgb2101010_line(void *dbuf, const void *sbuf, unsigned int pixels)
{
@@ -1250,141 +1339,3 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
}
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono);
-
-static uint32_t drm_fb_nonalpha_fourcc(uint32_t fourcc)
-{
- /* only handle formats with depth != 0 and alpha channel */
- switch (fourcc) {
- case DRM_FORMAT_ARGB1555:
- return DRM_FORMAT_XRGB1555;
- case DRM_FORMAT_ABGR1555:
- return DRM_FORMAT_XBGR1555;
- case DRM_FORMAT_RGBA5551:
- return DRM_FORMAT_RGBX5551;
- case DRM_FORMAT_BGRA5551:
- return DRM_FORMAT_BGRX5551;
- case DRM_FORMAT_ARGB8888:
- return DRM_FORMAT_XRGB8888;
- case DRM_FORMAT_ABGR8888:
- return DRM_FORMAT_XBGR8888;
- case DRM_FORMAT_RGBA8888:
- return DRM_FORMAT_RGBX8888;
- case DRM_FORMAT_BGRA8888:
- return DRM_FORMAT_BGRX8888;
- case DRM_FORMAT_ARGB2101010:
- return DRM_FORMAT_XRGB2101010;
- case DRM_FORMAT_ABGR2101010:
- return DRM_FORMAT_XBGR2101010;
- case DRM_FORMAT_RGBA1010102:
- return DRM_FORMAT_RGBX1010102;
- case DRM_FORMAT_BGRA1010102:
- return DRM_FORMAT_BGRX1010102;
- }
-
- return fourcc;
-}
-
-static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t fourcc)
-{
- const uint32_t *fourccs_end = fourccs + nfourccs;
-
- while (fourccs < fourccs_end) {
- if (*fourccs == fourcc)
- return true;
- ++fourccs;
- }
- return false;
-}
-
-/**
- * drm_fb_build_fourcc_list - Filters a list of supported color formats against
- * the device's native formats
- * @dev: DRM device
- * @native_fourccs: 4CC codes of natively supported color formats
- * @native_nfourccs: The number of entries in @native_fourccs
- * @fourccs_out: Returns 4CC codes of supported color formats
- * @nfourccs_out: The number of available entries in @fourccs_out
- *
- * This function create a list of supported color format from natively
- * supported formats and additional emulated formats.
- * At a minimum, most userspace programs expect at least support for
- * XRGB8888 on the primary plane. Devices that have to emulate the
- * format, and possibly others, can use drm_fb_build_fourcc_list() to
- * create a list of supported color formats. The returned list can
- * be handed over to drm_universal_plane_init() et al. Native formats
- * will go before emulated formats. Native formats with alpha channel
- * will be replaced by such without, as primary planes usually don't
- * support alpha. Other heuristics might be applied
- * to optimize the order. Formats near the beginning of the list are
- * usually preferred over formats near the end of the list.
- *
- * Returns:
- * The number of color-formats 4CC codes returned in @fourccs_out.
- */
-size_t drm_fb_build_fourcc_list(struct drm_device *dev,
- const u32 *native_fourccs, size_t native_nfourccs,
- u32 *fourccs_out, size_t nfourccs_out)
-{
- /*
- * XRGB8888 is the default fallback format for most of userspace
- * and it's currently the only format that should be emulated for
- * the primary plane. Only if there's ever another default fallback,
- * it should be added here.
- */
- static const uint32_t extra_fourccs[] = {
- DRM_FORMAT_XRGB8888,
- };
- static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
-
- u32 *fourccs = fourccs_out;
- const u32 *fourccs_end = fourccs_out + nfourccs_out;
- size_t i;
-
- /*
- * The device's native formats go first.
- */
-
- for (i = 0; i < native_nfourccs; ++i) {
- /*
- * Several DTs, boot loaders and firmware report native
- * alpha formats that are non-alpha formats instead. So
- * replace alpha formats by non-alpha formats.
- */
- u32 fourcc = drm_fb_nonalpha_fourcc(native_fourccs[i]);
-
- if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
- continue; /* skip duplicate entries */
- } else if (fourccs == fourccs_end) {
- drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
- continue; /* end of available output buffer */
- }
-
- drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
-
- *fourccs = fourcc;
- ++fourccs;
- }
-
- /*
- * The extra formats, emulated by the driver, go second.
- */
-
- for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
- u32 fourcc = extra_fourccs[i];
-
- if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
- continue; /* skip duplicate and native entries */
- } else if (fourccs == fourccs_end) {
- drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
- continue; /* end of available output buffer */
- }
-
- drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
-
- *fourccs = fourcc;
- ++fourccs;
- }
-
- return fourccs - fourccs_out;
-}
-EXPORT_SYMBOL(drm_fb_build_fourcc_list);
diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h
index 9f857bfa368d..9428d5cfebc5 100644
--- a/drivers/gpu/drm/drm_format_internal.h
+++ b/drivers/gpu/drm/drm_format_internal.h
@@ -42,7 +42,7 @@ static inline u32 drm_pixel_xrgb8888_to_r8_bt601(u32 pix)
u32 b = pix & 0x000000ff;
/* ITU-R BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- return (3 * r + 6 * g + b) / 10;
+ return (77 * r + 150 * g + 29 * b) / 256;
}
static inline u32 drm_pixel_xrgb8888_to_rgb332(u32 pix)
@@ -111,6 +111,14 @@ static inline u32 drm_pixel_xrgb8888_to_xbgr8888(u32 pix)
((pix & 0x000000ff) << 16);
}
+static inline u32 drm_pixel_xrgb8888_to_bgrx8888(u32 pix)
+{
+ return ((pix & 0xff000000) >> 24) | /* also copy filler bits */
+ ((pix & 0x00ff0000) >> 8) |
+ ((pix & 0x0000ff00) << 8) |
+ ((pix & 0x000000ff) << 24);
+}
+
static inline u32 drm_pixel_xrgb8888_to_abgr8888(u32 pix)
{
return GENMASK(31, 24) | /* fill alpha bits */
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 3a94ca211f9c..55ddd99fd7f6 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -346,6 +346,33 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_P030, .depth = 0, .num_planes = 2,
.char_per_block = { 4, 8, 0 }, .block_w = { 3, 3, 0 }, .block_h = { 1, 1, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S010, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S210, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S410, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S012, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S212, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S412, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S016, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_S216, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 2, .vsub = 1, .is_yuv = true},
+ { .format = DRM_FORMAT_S416, .depth = 0, .num_planes = 3,
+ .char_per_block = { 2, 2, 2 }, .block_w = { 1, 1, 1 }, .block_h = { 1, 1, 1 },
+ .hsub = 1, .vsub = 1, .is_yuv = true},
};
unsigned int i;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 1e659d2660f7..19d50d254fe6 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -26,6 +26,7 @@
*/
#include <linux/dma-buf.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/iosys-map.h>
@@ -1184,38 +1185,6 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
obj->funcs->print_info(p, indent, obj);
}
-int drm_gem_pin_locked(struct drm_gem_object *obj)
-{
- if (obj->funcs->pin)
- return obj->funcs->pin(obj);
-
- return 0;
-}
-
-void drm_gem_unpin_locked(struct drm_gem_object *obj)
-{
- if (obj->funcs->unpin)
- obj->funcs->unpin(obj);
-}
-
-int drm_gem_pin(struct drm_gem_object *obj)
-{
- int ret;
-
- dma_resv_lock(obj->resv, NULL);
- ret = drm_gem_pin_locked(obj);
- dma_resv_unlock(obj->resv);
-
- return ret;
-}
-
-void drm_gem_unpin(struct drm_gem_object *obj)
-{
- dma_resv_lock(obj->resv, NULL);
- drm_gem_unpin_locked(obj);
- dma_resv_unlock(obj->resv);
-}
-
int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
diff --git a/drivers/gpu/drm/drm_gem_atomic_helper.c b/drivers/gpu/drm/drm_gem_atomic_helper.c
index 93337543aac3..ebf305fb24f0 100644
--- a/drivers/gpu/drm/drm_gem_atomic_helper.c
+++ b/drivers/gpu/drm/drm_gem_atomic_helper.c
@@ -2,6 +2,7 @@
#include <linux/dma-resv.h>
#include <linux/dma-fence-chain.h>
+#include <linux/export.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_atomic_uapi.h>
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 6f72e7a0f427..618ce725cd75 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -5,6 +5,7 @@
* Copyright (C) 2017 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -607,8 +608,11 @@ int drm_gem_fb_afbc_init(struct drm_device *dev,
if (ret < 0)
return ret;
- if (objs[0]->size < afbc_fb->afbc_size)
+ if (objs[0]->size < afbc_fb->afbc_size) {
+ drm_dbg_kms(dev, "GEM object size (%zu) smaller than minimum afbc size (%u)\n",
+ objs[0]->size, afbc_fb->afbc_size);
return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index aa43265f4f4f..8ac0b1fa5287 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -348,6 +348,8 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
+ dma_resv_assert_held(obj->resv);
+
if (drm_gem_is_imported(obj)) {
ret = dma_buf_vmap(obj->dma_buf, map);
} else {
@@ -408,6 +410,8 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
{
struct drm_gem_object *obj = &shmem->base;
+ dma_resv_assert_held(obj->resv);
+
if (drm_gem_is_imported(obj)) {
dma_buf_vunmap(obj->dma_buf, map);
} else {
@@ -800,6 +804,63 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
+/**
+ * drm_gem_shmem_prime_import_no_map - Import dmabuf without mapping its sg_table
+ * @dev: Device to import into
+ * @dma_buf: dma-buf object to import
+ *
+ * Drivers that use the shmem helpers but also wants to import dmabuf without
+ * mapping its sg_table can use this as their &drm_driver.gem_prime_import
+ * implementation.
+ */
+struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct dma_buf_attachment *attach;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ size_t size;
+ int ret;
+
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
+ obj = dma_buf->priv;
+ drm_gem_object_get(obj);
+ return obj;
+ }
+
+ attach = dma_buf_attach(dma_buf, dev->dev);
+ if (IS_ERR(attach))
+ return ERR_CAST(attach);
+
+ get_dma_buf(dma_buf);
+
+ size = PAGE_ALIGN(attach->dmabuf->size);
+
+ shmem = __drm_gem_shmem_create(dev, size, true, NULL);
+ if (IS_ERR(shmem)) {
+ ret = PTR_ERR(shmem);
+ goto fail_detach;
+ }
+
+ drm_dbg_prime(dev, "size = %zu\n", size);
+
+ shmem->base.import_attach = attach;
+ shmem->base.resv = dma_buf->resv;
+
+ return &shmem->base;
+
+fail_detach:
+ dma_buf_detach(dma_buf, attach);
+ dma_buf_put(dma_buf);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_no_map);
+
MODULE_DESCRIPTION("DRM SHMEM memory-management helpers");
MODULE_IMPORT_NS("DMA_BUF");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 3734aa2d1c5b..257cca4cb97a 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/drm_gem_ttm_helper.h>
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 22b1fe9c03b8..b04cde4a60e7 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/export.h>
#include <linux/iosys-map.h>
#include <linux/module.h>
@@ -88,11 +89,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
* drmm_vram_helper_init() is a managed interface that installs a
* clean-up handler to run during the DRM device's release.
*
- * For drawing or scanout operations, rsp. buffer objects have to be pinned
- * in video RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
- * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
- * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
- *
* A buffer object that is pinned in video RAM has a fixed address within that
* memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
* it's used to program the hardware's scanout engine for framebuffers, set
@@ -299,30 +295,7 @@ out:
return 0;
}
-/**
- * drm_gem_vram_pin() - Pins a GEM VRAM object in a region.
- * @gbo: the GEM VRAM object
- * @pl_flag: a bitmask of possible memory regions
- *
- * Pinning a buffer object ensures that it is not evicted from
- * a memory region. A pinned buffer object has to be unpinned before
- * it can be pinned to another region. If the pl_flag argument is 0,
- * the buffer is pinned at its current location (video RAM or system
- * memory).
- *
- * Small buffer objects, such as cursor images, can lead to memory
- * fragmentation if they are pinned in the middle of video RAM. This
- * is especially a problem on devices with only a small amount of
- * video RAM. Fragmentation can prevent the primary framebuffer from
- * fitting in, even though there's enough memory overall. The modifier
- * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned
- * at the high end of the memory region to avoid fragmentation.
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
+static int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
{
int ret;
@@ -334,7 +307,6 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag)
return ret;
}
-EXPORT_SYMBOL(drm_gem_vram_pin);
static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
{
@@ -343,15 +315,7 @@ static void drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo)
ttm_bo_unpin(&gbo->bo);
}
-/**
- * drm_gem_vram_unpin() - Unpins a GEM VRAM object
- * @gbo: the GEM VRAM object
- *
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
- */
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
+static int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
{
int ret;
@@ -364,7 +328,6 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo)
return 0;
}
-EXPORT_SYMBOL(drm_gem_vram_unpin);
/**
* drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address
@@ -690,41 +653,6 @@ EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb);
*/
/**
- * drm_gem_vram_object_pin() - Implements &struct drm_gem_object_funcs.pin
- * @gem: The GEM object to pin
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-static int drm_gem_vram_object_pin(struct drm_gem_object *gem)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
- /*
- * Fbdev console emulation is the use case of these PRIME
- * helpers. This may involve updating a hardware buffer from
- * a shadow FB. We pin the buffer to it's current location
- * (either video RAM or system memory) to prevent it from
- * being relocated during the update operation. If you require
- * the buffer to be pinned to VRAM, implement a callback that
- * sets the flags accordingly.
- */
- return drm_gem_vram_pin_locked(gbo, 0);
-}
-
-/**
- * drm_gem_vram_object_unpin() - Implements &struct drm_gem_object_funcs.unpin
- * @gem: The GEM object to unpin
- */
-static void drm_gem_vram_object_unpin(struct drm_gem_object *gem)
-{
- struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem);
-
- drm_gem_vram_unpin_locked(gbo);
-}
-
-/**
* drm_gem_vram_object_vmap() -
* Implements &struct drm_gem_object_funcs.vmap
* @gem: The GEM object to map
@@ -762,8 +690,6 @@ static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem,
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
.free = drm_gem_vram_object_free,
- .pin = drm_gem_vram_object_pin,
- .unpin = drm_gem_vram_object_unpin,
.vmap = drm_gem_vram_object_vmap,
.vunmap = drm_gem_vram_object_vunmap,
.mmap = drm_gem_ttm_mmap,
diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 735bfdf4322f..ab198645d90f 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -7,6 +7,7 @@
*/
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/hmm.h>
#include <linux/memremap.h>
#include <linux/migrate.h>
@@ -981,6 +982,40 @@ static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm)
#endif
/**
+ * drm_gpusvm_find_vma_start() - Find start address for first VMA in range
+ * @gpusvm: Pointer to the GPU SVM structure
+ * @start: The inclusive start user address.
+ * @end: The exclusive end user address.
+ *
+ * Returns: The start address of first VMA within the provided range,
+ * ULONG_MAX otherwise. Assumes start_addr < end_addr.
+ */
+unsigned long
+drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
+ unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = gpusvm->mm;
+ struct vm_area_struct *vma;
+ unsigned long addr = ULONG_MAX;
+
+ if (!mmget_not_zero(mm))
+ return addr;
+
+ mmap_read_lock(mm);
+
+ vma = find_vma_intersection(mm, start, end);
+ if (vma)
+ addr = vma->vm_start;
+
+ mmap_read_unlock(mm);
+ mmput(mm);
+
+ return addr;
+}
+EXPORT_SYMBOL_GPL(drm_gpusvm_find_vma_start);
+
+/**
* drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range
* @gpusvm: Pointer to the GPU SVM structure
* @fault_addr: Fault address
diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index f9eb56f24bef..e89b932e987c 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -27,6 +27,7 @@
#include <drm/drm_gpuvm.h>
+#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/mm.h>
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index e44f28fd81d3..442eb31351dd 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -175,10 +175,6 @@ void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
-int drm_gem_pin_locked(struct drm_gem_object *obj);
-void drm_gem_unpin_locked(struct drm_gem_object *obj);
-int drm_gem_pin(struct drm_gem_object *obj);
-void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map);
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
index cc4c463daae7..247f468731de 100644
--- a/drivers/gpu/drm/drm_managed.c
+++ b/drivers/gpu/drm/drm_managed.c
@@ -7,6 +7,7 @@
#include <drm/drm_managed.h>
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index a4cd476f9b30..ba4be6be5d28 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -8,6 +8,7 @@
#include <linux/backlight.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index e5184a0c2465..a00d76443128 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -26,6 +26,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index b4239fd04e9d..25f376869b3a 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/uaccess.h>
#include <drm/drm_drv.h>
diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c
index 5565464c1734..ef32f6af10d4 100644
--- a/drivers/gpu/drm/drm_modeset_helper.c
+++ b/drivers/gpu/drm/drm_modeset_helper.c
@@ -20,6 +20,8 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_client_event.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 7694b85e75e3..beb91a13a312 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -21,6 +21,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
+
#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index 650de4da0853..c8bb28dccdc1 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -23,6 +23,7 @@
#include <linux/backlight.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -473,13 +474,51 @@ int of_drm_get_panel_orientation(const struct device_node *np,
EXPORT_SYMBOL(of_drm_get_panel_orientation);
#endif
+/* Find panel by fwnode. This should be identical to of_drm_find_panel(). */
+static struct drm_panel *find_panel_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ struct drm_panel *panel;
+
+ if (!fwnode_device_is_available(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&panel_lock);
+
+ list_for_each_entry(panel, &panel_list, list) {
+ if (dev_fwnode(panel->dev) == fwnode) {
+ mutex_unlock(&panel_lock);
+ return panel;
+ }
+ }
+
+ mutex_unlock(&panel_lock);
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+/* Find panel by follower device */
+static struct drm_panel *find_panel_by_dev(struct device *follower_dev)
+{
+ struct fwnode_handle *fwnode;
+ struct drm_panel *panel;
+
+ fwnode = fwnode_find_reference(dev_fwnode(follower_dev), "panel", 0);
+ if (IS_ERR(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ panel = find_panel_by_fwnode(fwnode);
+ fwnode_handle_put(fwnode);
+
+ return panel;
+}
+
/**
* drm_is_panel_follower() - Check if the device is a panel follower
* @dev: The 'struct device' to check
*
* This checks to see if a device needs to be power sequenced together with
* a panel using the panel follower API.
- * At the moment panels can only be followed on device tree enabled systems.
+ *
* The "panel" property of the follower points to the panel to be followed.
*
* Return: true if we should be power sequenced with a panel; false otherwise.
@@ -491,7 +530,7 @@ bool drm_is_panel_follower(struct device *dev)
* don't bother trying to parse it here. We just need to know if the
* property is there.
*/
- return of_property_present(dev->of_node, "panel");
+ return device_property_present(dev, "panel");
}
EXPORT_SYMBOL(drm_is_panel_follower);
@@ -508,7 +547,6 @@ EXPORT_SYMBOL(drm_is_panel_follower);
* If a follower is added to a panel that's already been turned on, the
* follower's prepare callback is called right away.
*
- * At the moment panels can only be followed on device tree enabled systems.
* The "panel" property of the follower points to the panel to be followed.
*
* Return: 0 or an error code. Note that -ENODEV means that we detected that
@@ -518,16 +556,10 @@ EXPORT_SYMBOL(drm_is_panel_follower);
int drm_panel_add_follower(struct device *follower_dev,
struct drm_panel_follower *follower)
{
- struct device_node *panel_np;
struct drm_panel *panel;
int ret;
- panel_np = of_parse_phandle(follower_dev->of_node, "panel", 0);
- if (!panel_np)
- return -ENODEV;
-
- panel = of_drm_find_panel(panel_np);
- of_node_put(panel_np);
+ panel = find_panel_by_dev(follower_dev);
if (IS_ERR(panel))
return PTR_ERR(panel);
diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c
index c477d98ade2b..598f812b7cb3 100644
--- a/drivers/gpu/drm/drm_panel_backlight_quirks.c
+++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c
@@ -2,6 +2,7 @@
#include <linux/array_size.h>
#include <linux/dmi.h>
+#include <linux/export.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <drm/drm_edid.h>
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 7ac0fd5391fe..3a218fb592ce 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -9,6 +9,7 @@
*/
#include <linux/dmi.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <drm/drm_connector.h>
#include <drm/drm_utils.h>
diff --git a/drivers/gpu/drm/drm_panic.c b/drivers/gpu/drm/drm_panic.c
index b4de79583805..1d6312fa1429 100644
--- a/drivers/gpu/drm/drm_panic.c
+++ b/drivers/gpu/drm/drm_panic.c
@@ -6,6 +6,7 @@
* Tux Ascii art taken from cowsay written by Tony Monroe
*/
+#include <linux/export.h>
#include <linux/font.h>
#include <linux/highmem.h>
#include <linux/init.h>
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index c585f1e8803e..cb0f68d7f8ea 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -23,7 +23,6 @@
*/
#include <linux/dma-mapping.h>
-#include <linux/export.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/pci.h>
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 04992dfd4c79..38f82391bfda 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 7982be4b0306..747d248aaf02 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -23,6 +23,7 @@
* SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/list.h>
#include <drm/drm_atomic.h>
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index d828502268b8..b703f83874e1 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -599,6 +599,7 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
+ int ret;
/*
* drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
@@ -608,7 +609,16 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
!obj->funcs->get_sg_table)
return -ENOSYS;
- return drm_gem_pin(obj);
+ if (!obj->funcs->pin)
+ return 0;
+
+ ret = dma_resv_lock(obj->resv, NULL);
+ if (ret)
+ return ret;
+ ret = obj->funcs->pin(obj);
+ dma_resv_unlock(obj->resv);
+
+ return ret;
}
EXPORT_SYMBOL(drm_gem_map_attach);
@@ -625,8 +635,16 @@ void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach)
{
struct drm_gem_object *obj = dma_buf->priv;
+ int ret;
+
+ if (!obj->funcs->unpin)
+ return;
- drm_gem_unpin(obj);
+ ret = dma_resv_lock(obj->resv, NULL);
+ if (drm_WARN_ON(obj->dev, ret))
+ return;
+ obj->funcs->unpin(obj);
+ dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL(drm_gem_map_detach);
@@ -910,6 +928,26 @@ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
}
EXPORT_SYMBOL(drm_gem_prime_export);
+
+/**
+ * drm_gem_is_prime_exported_dma_buf -
+ * checks if the DMA-BUF was exported from a GEM object belonging to @dev.
+ * @dev: drm_device to check against
+ * @dma_buf: dma-buf object to import
+ *
+ * Return: true if the DMA-BUF was exported from a GEM object belonging
+ * to @dev, false otherwise.
+ */
+
+bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct drm_gem_object *obj = dma_buf->priv;
+
+ return (dma_buf->ops == &drm_gem_prime_dmabuf_ops) && (obj->dev == dev);
+}
+EXPORT_SYMBOL(drm_gem_is_prime_exported_dma_buf);
+
/**
* drm_gem_prime_import_dev - core implementation of the import callback
* @dev: drm_device to import into
@@ -933,16 +971,14 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct drm_gem_object *obj;
int ret;
- if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) {
+ if (drm_gem_is_prime_exported_dma_buf(dev, dma_buf)) {
+ /*
+ * Importing dmabuf exported from our own gem increases
+ * refcount on gem itself instead of f_count of dmabuf.
+ */
obj = dma_buf->priv;
- if (obj->dev == dev) {
- /*
- * Importing dmabuf exported from our own gem increases
- * refcount on gem itself instead of f_count of dmabuf.
- */
- drm_gem_object_get(obj);
- return obj;
- }
+ drm_gem_object_get(obj);
+ return obj;
}
if (!dev->driver->gem_prime_import_sg_table)
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 79517bd4418f..ded9461df5f2 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -25,6 +25,7 @@
#include <linux/debugfs.h>
#include <linux/dynamic_debug.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <linux/seq_file.h>
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
index 6cc39e30781f..8959f7084e0b 100644
--- a/drivers/gpu/drm/drm_privacy_screen.c
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -7,6 +7,7 @@
*/
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
diff --git a/drivers/gpu/drm/drm_self_refresh_helper.c b/drivers/gpu/drm/drm_self_refresh_helper.c
index dd33fec5aabd..c0948586b7fd 100644
--- a/drivers/gpu/drm/drm_self_refresh_helper.c
+++ b/drivers/gpu/drm/drm_self_refresh_helper.c
@@ -7,6 +7,7 @@
*/
#include <linux/average.h>
#include <linux/bitops.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 250819fbc5ce..fcbcaaa36b5f 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -3,6 +3,7 @@
* Copyright (C) 2016 Noralf Trønnes
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/drm_suballoc.c b/drivers/gpu/drm/drm_suballoc.c
index 38cc7a123819..879ea33dbbc4 100644
--- a/drivers/gpu/drm/drm_suballoc.c
+++ b/drivers/gpu/drm/drm_suballoc.c
@@ -42,6 +42,8 @@
#include <drm/drm_suballoc.h>
#include <drm/drm_print.h>
+
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/wait.h>
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 636cd83ca29e..e1b0fa4000cd 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -195,6 +195,7 @@
#include <linux/anon_inodes.h>
#include <linux/dma-fence-unwrap.h>
#include <linux/eventfd.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/sched/signal.h>
diff --git a/drivers/gpu/drm/drm_vblank_work.c b/drivers/gpu/drm/drm_vblank_work.c
index 9cc71120246f..e4e1873f0e1e 100644
--- a/drivers/gpu/drm/drm_vblank_work.c
+++ b/drivers/gpu/drm/drm_vblank_work.c
@@ -2,6 +2,8 @@
#include <uapi/linux/sched/types.h>
+#include <linux/export.h>
+
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
#include <drm/drm_vblank_work.h>
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 83229a031af0..58659c16874c 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -23,6 +23,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/rbtree.h>
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index edbeab88ff2b..ace8c98c3e04 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -10,6 +10,7 @@
*/
#include <linux/dma-fence.h>
+#include <linux/export.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 3c0a5c3e0e3d..76c742328edb 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -534,7 +534,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&submit->sched_job,
&ctx->sched_entity[args->pipe],
- 1, submit->ctx);
+ 1, submit->ctx, file->client_id);
if (ret)
goto err_submit_put;
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm.h b/drivers/gpu/drm/hyperv/hyperv_drm.h
index d2d8582b36df..9e776112c03e 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm.h
+++ b/drivers/gpu/drm/hyperv/hyperv_drm.h
@@ -11,7 +11,9 @@
struct hyperv_drm_device {
/* drm */
struct drm_device dev;
- struct drm_simple_display_pipe pipe;
+ struct drm_plane plane;
+ struct drm_crtc crtc;
+ struct drm_encoder encoder;
struct drm_connector connector;
/* mode */
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
index 6c6b57298797..945b9482bcb3 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_modeset.c
@@ -5,6 +5,8 @@
#include <linux/hyperv.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
@@ -15,7 +17,8 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_panic.h>
+#include <drm/drm_plane.h>
#include "hyperv_drm.h"
@@ -38,18 +41,6 @@ static int hyperv_blit_to_vram_rect(struct drm_framebuffer *fb,
return 0;
}
-static int hyperv_blit_to_vram_fullscreen(struct drm_framebuffer *fb,
- const struct iosys_map *map)
-{
- struct drm_rect fullscreen = {
- .x1 = 0,
- .x2 = fb->width,
- .y1 = 0,
- .y2 = fb->height,
- };
- return hyperv_blit_to_vram_rect(fb, map, &fullscreen);
-}
-
static int hyperv_connector_get_modes(struct drm_connector *connector)
{
struct hyperv_drm_device *hv = to_hv(connector->dev);
@@ -98,30 +89,66 @@ static int hyperv_check_size(struct hyperv_drm_device *hv, int w, int h,
return 0;
}
-static void hyperv_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static const uint32_t hyperv_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t hyperv_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static void hyperv_crtc_helper_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct hyperv_drm_device *hv = to_hv(crtc->dev);
+ struct drm_plane *plane = &hv->plane;
+ struct drm_plane_state *plane_state = plane->state;
+ struct drm_crtc_state *crtc_state = crtc->state;
hyperv_hide_hw_ptr(hv->hdev);
hyperv_update_situation(hv->hdev, 1, hv->screen_depth,
crtc_state->mode.hdisplay,
crtc_state->mode.vdisplay,
plane_state->fb->pitches[0]);
- hyperv_blit_to_vram_fullscreen(plane_state->fb, &shadow_plane_state->data[0]);
}
-static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+static const struct drm_crtc_helper_funcs hyperv_crtc_helper_funcs = {
+ .atomic_check = drm_crtc_helper_atomic_check,
+ .atomic_enable = hyperv_crtc_helper_atomic_enable,
+};
+
+static const struct drm_crtc_funcs hyperv_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int hyperv_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state = NULL;
+ int ret;
- if (fb->format->format != DRM_FORMAT_XRGB8888)
- return -EINVAL;
+ if (crtc)
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ if (ret)
+ return ret;
+
+ if (!plane_state->visible)
+ return 0;
if (fb->pitches[0] * fb->height > hv->fb_size) {
drm_err(&hv->dev, "fb size requested by %s for %dX%d (pitch %d) greater than %ld\n",
@@ -132,53 +159,120 @@ static int hyperv_pipe_check(struct drm_simple_display_pipe *pipe,
return 0;
}
-static void hyperv_pipe_update(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *old_state)
+static void hyperv_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
- struct hyperv_drm_device *hv = to_hv(pipe->crtc.dev);
- struct drm_plane_state *state = pipe->plane.state;
- struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
- struct drm_rect rect;
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
+ struct drm_rect damage;
+ struct drm_rect dst_clip;
+ struct drm_atomic_helper_damage_iter iter;
+
+ drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
+ drm_atomic_for_each_plane_damage(&iter, &damage) {
+ dst_clip = new_state->dst;
+
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+ hyperv_blit_to_vram_rect(new_state->fb, &shadow_plane_state->data[0], &damage);
+ hyperv_update_dirt(hv->hdev, &damage);
+ }
+}
- if (drm_atomic_helper_damage_merged(old_state, state, &rect)) {
- hyperv_blit_to_vram_rect(state->fb, &shadow_plane_state->data[0], &rect);
- hyperv_update_dirt(hv->hdev, &rect);
+static int hyperv_plane_get_scanout_buffer(struct drm_plane *plane,
+ struct drm_scanout_buffer *sb)
+{
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR_IOMEM(hv->vram);
+
+ if (plane->state && plane->state->fb) {
+ sb->format = plane->state->fb->format;
+ sb->width = plane->state->fb->width;
+ sb->height = plane->state->fb->height;
+ sb->pitch[0] = plane->state->fb->pitches[0];
+ sb->map[0] = map;
+ return 0;
}
+ return -ENODEV;
}
-static const struct drm_simple_display_pipe_funcs hyperv_pipe_funcs = {
- .enable = hyperv_pipe_enable,
- .check = hyperv_pipe_check,
- .update = hyperv_pipe_update,
- DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+static void hyperv_plane_panic_flush(struct drm_plane *plane)
+{
+ struct hyperv_drm_device *hv = to_hv(plane->dev);
+ struct drm_rect rect;
+
+ if (!plane->state || !plane->state->fb)
+ return;
+
+ rect.x1 = 0;
+ rect.y1 = 0;
+ rect.x2 = plane->state->fb->width;
+ rect.y2 = plane->state->fb->height;
+
+ hyperv_update_dirt(hv->hdev, &rect);
+}
+
+static const struct drm_plane_helper_funcs hyperv_plane_helper_funcs = {
+ DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
+ .atomic_check = hyperv_plane_atomic_check,
+ .atomic_update = hyperv_plane_atomic_update,
+ .get_scanout_buffer = hyperv_plane_get_scanout_buffer,
+ .panic_flush = hyperv_plane_panic_flush,
};
-static const uint32_t hyperv_formats[] = {
- DRM_FORMAT_XRGB8888,
+static const struct drm_plane_funcs hyperv_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static const uint64_t hyperv_modifiers[] = {
- DRM_FORMAT_MOD_LINEAR,
- DRM_FORMAT_MOD_INVALID
+static const struct drm_encoder_funcs hyperv_drm_simple_encoder_funcs_cleanup = {
+ .destroy = drm_encoder_cleanup,
};
static inline int hyperv_pipe_init(struct hyperv_drm_device *hv)
{
+ struct drm_device *dev = &hv->dev;
+ struct drm_encoder *encoder = &hv->encoder;
+ struct drm_plane *plane = &hv->plane;
+ struct drm_crtc *crtc = &hv->crtc;
+ struct drm_connector *connector = &hv->connector;
int ret;
- ret = drm_simple_display_pipe_init(&hv->dev,
- &hv->pipe,
- &hyperv_pipe_funcs,
- hyperv_formats,
- ARRAY_SIZE(hyperv_formats),
- hyperv_modifiers,
- &hv->connector);
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &hyperv_plane_funcs,
+ hyperv_formats, ARRAY_SIZE(hyperv_formats),
+ hyperv_modifiers,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
+ drm_plane_helper_add(plane, &hyperv_plane_helper_funcs);
+ drm_plane_enable_fb_damage_clips(plane);
- drm_plane_enable_fb_damage_clips(&hv->pipe.plane);
+ ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
+ &hyperv_crtc_funcs, NULL);
+ if (ret)
+ return ret;
+ drm_crtc_helper_add(crtc, &hyperv_crtc_helper_funcs);
- return 0;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+ ret = drm_encoder_init(dev, encoder,
+ &hyperv_drm_simple_encoder_funcs_cleanup,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret)
+ return ret;
+
+ ret = hyperv_conn_init(hv);
+ if (ret) {
+ drm_err(dev, "Failed to initialized connector.\n");
+ return ret;
+ }
+
+ return drm_connector_attach_encoder(connector, encoder);
}
static enum drm_mode_status
@@ -221,12 +315,6 @@ int hyperv_mode_config_init(struct hyperv_drm_device *hv)
dev->mode_config.funcs = &hyperv_mode_config_funcs;
- ret = hyperv_conn_init(hv);
- if (ret) {
- drm_err(dev, "Failed to initialized connector.\n");
- return ret;
- }
-
ret = hyperv_pipe_init(hv);
if (ret) {
drm_err(dev, "Failed to initialized pipe.\n");
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index a825dbe0a46e..8d9cb73a93a7 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -1277,6 +1277,8 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+ intel_panel_prepare(crtc_state, conn_state);
+
intel_crtc_vblank_on(crtc_state);
}
@@ -1410,6 +1412,8 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ intel_panel_unprepare(old_conn_state);
+
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_backlight_disable(old_conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 9a61c972dce9..2867d76d1a5e 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -32,7 +32,6 @@
#include "i915_drv.h"
#include "i915_utils.h"
-#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_display_core.h"
#include "intel_display_debugfs.h"
@@ -153,36 +152,36 @@ void intel_connector_destroy(struct drm_connector *connector)
kfree(connector);
}
-int intel_connector_register(struct drm_connector *connector)
+int intel_connector_register(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_connector *connector = to_intel_connector(_connector);
+ struct drm_i915_private *i915 = to_i915(_connector->dev);
int ret;
- ret = intel_backlight_device_register(intel_connector);
+ ret = intel_panel_register(connector);
if (ret)
goto err;
if (i915_inject_probe_failure(i915)) {
ret = -EFAULT;
- goto err_backlight;
+ goto err_panel;
}
- intel_connector_debugfs_add(intel_connector);
+ intel_connector_debugfs_add(connector);
return 0;
-err_backlight:
- intel_backlight_device_unregister(intel_connector);
+err_panel:
+ intel_panel_unregister(connector);
err:
return ret;
}
-void intel_connector_unregister(struct drm_connector *connector)
+void intel_connector_unregister(struct drm_connector *_connector)
{
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = to_intel_connector(_connector);
- intel_backlight_device_unregister(intel_connector);
+ intel_panel_unregister(connector);
}
void intel_connector_attach_encoder(struct intel_connector *connector,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ed4d743fc7c5..30c7315fc25e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_panel.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank_work.h>
#include <drm/intel/i915_hdcp_interface.h>
@@ -384,6 +385,9 @@ struct intel_vbt_panel_data {
};
struct intel_panel {
+ /* Simple drm_panel */
+ struct drm_panel *base;
+
/* Fixed EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
const struct drm_edid *fixed_edid;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index f5c972880391..f956919dc648 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -462,3 +462,134 @@ void intel_panel_fini(struct intel_connector *connector)
drm_mode_destroy(connector->base.dev, fixed_mode);
}
}
+
+/*
+ * If the panel was already enabled at probe, and we took over the state, the
+ * panel prepared state is out of sync, and the panel followers won't be
+ * notified. We need to call drm_panel_prepare() on enabled panels.
+ *
+ * It would be natural to handle this e.g. in the connector ->sync_state hook at
+ * intel_modeset_readout_hw_state(), but that's unfortunately too early. We
+ * don't have drm_connector::kdev at that time. For now, figure out the state at
+ * ->late_register, and sync there.
+ */
+static void intel_panel_sync_state(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct drm_connector_state *conn_state;
+ struct intel_crtc *crtc;
+ int ret;
+
+ ret = drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL);
+ if (ret)
+ return;
+
+ conn_state = connector->base.state;
+
+ crtc = to_intel_crtc(conn_state->crtc);
+ if (crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active) {
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Panel prepare\n",
+ connector->base.base.id, connector->base.name);
+ intel_panel_prepare(crtc_state, conn_state);
+ }
+ }
+
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
+}
+
+const struct drm_panel_funcs dummy_panel_funcs = {
+};
+
+int intel_panel_register(struct intel_connector *connector)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ ret = intel_backlight_device_register(connector);
+ if (ret)
+ return ret;
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
+ struct device *dev = connector->base.kdev;
+ struct drm_panel *base;
+
+ /* Sanity check. */
+ if (drm_WARN_ON(display->drm, !dev))
+ goto out;
+
+ /*
+ * We need drm_connector::kdev for allocating the panel, to make
+ * drm_panel_add_follower() lookups work. The kdev is
+ * initialized in drm_sysfs_connector_add(), just before the
+ * connector .late_register() hooks. So we can't allocate the
+ * panel at connector init time, and can't allocate struct
+ * intel_panel with a drm_panel sub-struct. For now, use
+ * __devm_drm_panel_alloc() directly.
+ *
+ * The lookups also depend on drm_connector::fwnode being set in
+ * intel_acpi_assign_connector_fwnodes(). However, if that's
+ * missing, it will gracefully lead to -EPROBE_DEFER in
+ * drm_panel_add_follower().
+ */
+ base = __devm_drm_panel_alloc(dev, sizeof(*base), 0,
+ &dummy_panel_funcs,
+ connector->base.connector_type);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto err;
+ }
+
+ panel->base = base;
+
+ drm_panel_add(panel->base);
+
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Registered panel device '%s', has fwnode: %s\n",
+ connector->base.base.id, connector->base.name,
+ dev_name(dev), str_yes_no(dev_fwnode(dev)));
+
+ intel_panel_sync_state(connector);
+ }
+
+out:
+ return 0;
+
+err:
+ intel_backlight_device_unregister(connector);
+
+ return ret;
+}
+
+void intel_panel_unregister(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->base)
+ drm_panel_remove(panel->base);
+
+ intel_backlight_device_unregister(connector);
+}
+
+/* Notify followers, if any, about power being up. */
+void intel_panel_prepare(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_panel_prepare(panel->base);
+}
+
+/* Notify followers, if any, about power going down. */
+void intel_panel_unprepare(const struct drm_connector_state *old_conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_panel_unprepare(panel->base);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index b60d12322e5d..56a6412cf0fb 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -23,6 +23,8 @@ void intel_panel_init_alloc(struct intel_connector *connector);
int intel_panel_init(struct intel_connector *connector,
const struct drm_edid *fixed_edid);
void intel_panel_fini(struct intel_connector *connector);
+int intel_panel_register(struct intel_connector *connector);
+void intel_panel_unregister(struct intel_connector *connector);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
bool intel_panel_use_ssc(struct intel_display *display);
@@ -51,4 +53,8 @@ void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector);
void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector,
struct intel_encoder *encoder);
+void intel_panel_prepare(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_panel_unprepare(const struct drm_connector_state *old_conn_state);
+
#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index d1a382dfaa1d..93298820bee2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -250,11 +250,17 @@ void intel_gt_watchdog_work(struct work_struct *work)
llist_for_each_entry_safe(rq, rn, first, watchdog.link) {
if (!i915_request_completed(rq)) {
struct dma_fence *f = &rq->fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(f);
+ timeline = dma_fence_timeline_name(f);
pr_notice("Fence expiration time out i915-%s:%s:%llx!\n",
- f->ops->get_driver_name(f),
- f->ops->get_timeline_name(f),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
f->seqno);
+ rcu_read_unlock();
i915_request_cancel(rq, -EINTR);
}
i915_request_put(rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 971fbff1211c..4a1675dea1c7 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1448,7 +1448,8 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
else
drm_dev_wedged_event(&gt->i915->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
+ NULL);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c3d27eadc0a7..b9a2b2194c8f 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -2184,7 +2184,7 @@ void i915_request_show(struct drm_printer *m,
const char *prefix,
int indent)
{
- const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
+ const char __rcu *timeline;
char buf[80] = "";
int x = 0;
@@ -2220,6 +2220,8 @@ void i915_request_show(struct drm_printer *m,
x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
+ rcu_read_lock();
+ timeline = dma_fence_timeline_name((struct dma_fence *)&rq->fence);
drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n",
prefix, indent, " ",
queue_status(rq),
@@ -2228,7 +2230,8 @@ void i915_request_show(struct drm_printer *m,
fence_status(rq),
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
- name);
+ rcu_dereference(timeline));
+ rcu_read_unlock();
}
static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index a4902ee08b6e..73e89b168fc3 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -430,16 +430,22 @@ static void timer_i915_sw_fence_wake(struct timer_list *t)
struct i915_sw_dma_fence_cb_timer *cb = timer_container_of(cb, t,
timer);
struct i915_sw_fence *fence;
+ const char __rcu *timeline;
+ const char __rcu *driver;
fence = xchg(&cb->base.fence, NULL);
if (!fence)
return;
+ rcu_read_lock();
+ driver = dma_fence_driver_name(cb->dma);
+ timeline = dma_fence_timeline_name(cb->dma);
pr_notice("Asynchronous wait on fence %s:%s:%llx timed out (hint:%ps)\n",
- cb->dma->ops->get_driver_name(cb->dma),
- cb->dma->ops->get_timeline_name(cb->dma),
+ rcu_dereference(driver),
+ rcu_dereference(timeline),
cb->dma->seqno,
i915_sw_fence_debug_hint(fence));
+ rcu_read_unlock();
i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
i915_sw_fence_complete(fence);
diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c
index 59b334d094fa..7564b0f21b42 100644
--- a/drivers/gpu/drm/imagination/pvr_job.c
+++ b/drivers/gpu/drm/imagination/pvr_job.c
@@ -446,7 +446,7 @@ create_job(struct pvr_device *pvr_dev,
if (err)
goto err_put_job;
- err = pvr_queue_job_init(job);
+ err = pvr_queue_job_init(job, pvr_file->file->client_id);
if (err)
goto err_put_job;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index 5e9bc0992824..5a41ee79fed6 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -1073,6 +1073,7 @@ static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
/**
* pvr_queue_job_init() - Initialize queue related fields in a pvr_job object.
* @job: The job to initialize.
+ * @drm_client_id: drm_file.client_id submitting the job
*
* Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm()
* and pvr_queue_job_push() can't fail. We also make sure the context type is
@@ -1082,7 +1083,7 @@ static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue)
* * 0 on success, or
* * An error code if something failed.
*/
-int pvr_queue_job_init(struct pvr_job *job)
+int pvr_queue_job_init(struct pvr_job *job, u64 drm_client_id)
{
/* Fragment jobs need at least one native fence wait on the geometry job fence. */
u32 min_native_dep_count = job->type == DRM_PVR_JOB_TYPE_FRAGMENT ? 1 : 0;
@@ -1099,7 +1100,7 @@ int pvr_queue_job_init(struct pvr_job *job)
if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count)))
return -E2BIG;
- err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE);
+ err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE, drm_client_id);
if (err)
return err;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index 93fe9ac9f58c..fc1986d73fc8 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -143,7 +143,7 @@ struct pvr_queue {
bool pvr_queue_fence_is_ufo_backed(struct dma_fence *f);
-int pvr_queue_job_init(struct pvr_job *job);
+int pvr_queue_job_init(struct pvr_job *job, u64 drm_client_id);
void pvr_queue_job_cleanup(struct pvr_job *job);
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 03535a15dd8f..3e8c6edbc17c 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+source "drivers/gpu/drm/imx/dc/Kconfig"
source "drivers/gpu/drm/imx/dcss/Kconfig"
source "drivers/gpu/drm/imx/ipuv3/Kconfig"
source "drivers/gpu/drm/imx/lcdc/Kconfig"
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index 86f38e7c7422..c7b317640d71 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DRM_IMX8_DC) += dc/
obj-$(CONFIG_DRM_IMX_DCSS) += dcss/
obj-$(CONFIG_DRM_IMX) += ipuv3/
obj-$(CONFIG_DRM_IMX_LCDC) += lcdc/
diff --git a/drivers/gpu/drm/imx/dc/Kconfig b/drivers/gpu/drm/imx/dc/Kconfig
new file mode 100644
index 000000000000..415993207f2e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/Kconfig
@@ -0,0 +1,13 @@
+config DRM_IMX8_DC
+ tristate "Freescale i.MX8 Display Controller Graphics"
+ depends on DRM && COMMON_CLK && OF && (ARCH_MXC || COMPILE_TEST)
+ select DRM_CLIENT_SELECTION
+ select DRM_GEM_DMA_HELPER
+ select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
+ select GENERIC_IRQ_CHIP
+ select REGMAP
+ select REGMAP_MMIO
+ help
+ enable Freescale i.MX8 Display Controller(DC) graphics support
diff --git a/drivers/gpu/drm/imx/dc/Makefile b/drivers/gpu/drm/imx/dc/Makefile
new file mode 100644
index 000000000000..b9d33c074984
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+
+imx8-dc-drm-objs := dc-cf.o dc-crtc.o dc-de.o dc-drv.o dc-ed.o dc-fg.o dc-fl.o \
+ dc-fu.o dc-fw.o dc-ic.o dc-kms.o dc-lb.o dc-pe.o \
+ dc-plane.o dc-tc.o
+
+obj-$(CONFIG_DRM_IMX8_DC) += imx8-dc-drm.o
diff --git a/drivers/gpu/drm/imx/dc/dc-cf.c b/drivers/gpu/drm/imx/dc/dc-cf.c
new file mode 100644
index 000000000000..2f077161e912
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-cf.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define STATICCONTROL 0x8
+
+#define FRAMEDIMENSIONS 0xc
+#define HEIGHT(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define WIDTH(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define CONSTANTCOLOR 0x10
+#define BLUE(x) FIELD_PREP(GENMASK(15, 8), (x))
+
+static const struct dc_subdev_info dc_cf_info[] = {
+ { .reg_start = 0x56180960, .id = 0, },
+ { .reg_start = 0x561809e0, .id = 1, },
+ { .reg_start = 0x561809a0, .id = 4, },
+ { .reg_start = 0x56180a20, .id = 5, },
+};
+
+static const struct regmap_range dc_cf_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, CONSTANTCOLOR),
+};
+
+static const struct regmap_access_table dc_cf_regmap_access_table = {
+ .yes_ranges = dc_cf_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_cf_regmap_ranges),
+};
+
+static const struct regmap_config dc_cf_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_cf_regmap_access_table,
+ .rd_table = &dc_cf_regmap_access_table,
+ .max_register = CONSTANTCOLOR,
+};
+
+static inline void dc_cf_enable_shden(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, STATICCONTROL, SHDEN);
+}
+
+enum dc_link_id dc_cf_get_link_id(struct dc_cf *cf)
+{
+ return cf->link;
+}
+
+void dc_cf_framedimensions(struct dc_cf *cf, unsigned int w,
+ unsigned int h)
+{
+ regmap_write(cf->reg_cfg, FRAMEDIMENSIONS, WIDTH(w) | HEIGHT(h));
+}
+
+void dc_cf_constantcolor_black(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, CONSTANTCOLOR, 0);
+}
+
+void dc_cf_constantcolor_blue(struct dc_cf *cf)
+{
+ regmap_write(cf->reg_cfg, CONSTANTCOLOR, BLUE(0xff));
+}
+
+void dc_cf_init(struct dc_cf *cf)
+{
+ dc_cf_enable_shden(cf);
+}
+
+static int dc_cf_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_cfg;
+ struct dc_cf *cf;
+ int id;
+
+ cf = devm_kzalloc(dev, sizeof(*cf), GFP_KERNEL);
+ if (!cf)
+ return -ENOMEM;
+
+ res_pec = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ cf->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_cf_cfg_regmap_config);
+ if (IS_ERR(cf->reg_cfg))
+ return PTR_ERR(cf->reg_cfg);
+
+ id = dc_subdev_get_id(dc_cf_info, ARRAY_SIZE(dc_cf_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ switch (id) {
+ case 0:
+ cf->link = LINK_ID_CONSTFRAME0;
+ dc_drm->cf_cont[0] = cf;
+ break;
+ case 1:
+ cf->link = LINK_ID_CONSTFRAME1;
+ dc_drm->cf_cont[1] = cf;
+ break;
+ case 4:
+ cf->link = LINK_ID_CONSTFRAME4;
+ dc_drm->cf_safe[0] = cf;
+ break;
+ case 5:
+ cf->link = LINK_ID_CONSTFRAME5;
+ dc_drm->cf_safe[1] = cf;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct component_ops dc_cf_ops = {
+ .bind = dc_cf_bind,
+};
+
+static int dc_cf_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_cf_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_cf_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_cf_ops);
+}
+
+static const struct of_device_id dc_cf_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-constframe" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_cf_dt_ids);
+
+struct platform_driver dc_cf_driver = {
+ .probe = dc_cf_probe,
+ .remove = dc_cf_remove,
+ .driver = {
+ .name = "imx8-dc-constframe",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_cf_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-crtc.c b/drivers/gpu/drm/imx/dc/dc-crtc.c
new file mode 100644
index 000000000000..31d3a982deaf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-crtc.c
@@ -0,0 +1,555 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/completion.h>
+#include <linux/container_of.h>
+#include <linux/interrupt.h>
+#include <linux/irqreturn.h>
+#include <linux/pm_runtime.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_print.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-kms.h"
+#include "dc-pe.h"
+
+#define dc_crtc_dbg(crtc, fmt, ...) \
+do { \
+ struct drm_crtc *_crtc = (crtc); \
+ drm_dbg_kms(_crtc->dev, "[CRTC:%d:%s] " fmt, \
+ _crtc->base.id, _crtc->name, ##__VA_ARGS__); \
+} while (0)
+
+#define dc_crtc_err(crtc, fmt, ...) \
+do { \
+ struct drm_crtc *_crtc = (crtc); \
+ drm_err(_crtc->dev, "[CRTC:%d:%s] " fmt, \
+ _crtc->base.id, _crtc->name, ##__VA_ARGS__); \
+} while (0)
+
+#define DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(c) \
+do { \
+ unsigned long ret; \
+ ret = wait_for_completion_timeout(&dc_crtc->c, HZ); \
+ if (ret == 0) \
+ dc_crtc_err(crtc, "%s: wait for " #c " timeout\n", \
+ __func__); \
+} while (0)
+
+#define DC_CRTC_CHECK_FRAMEGEN_FIFO(fg) \
+do { \
+ struct dc_fg *_fg = (fg); \
+ if (dc_fg_secondary_requests_to_read_empty_fifo(_fg)) { \
+ dc_fg_secondary_clear_channel_status(_fg); \
+ dc_crtc_err(crtc, "%s: FrameGen FIFO empty\n", \
+ __func__); \
+ } \
+} while (0)
+
+#define DC_CRTC_WAIT_FOR_FRAMEGEN_SECONDARY_SYNCUP(fg) \
+do { \
+ if (dc_fg_wait_for_secondary_syncup(fg)) \
+ dc_crtc_err(crtc, \
+ "%s: FrameGen secondary channel isn't syncup\n",\
+ __func__); \
+} while (0)
+
+static inline struct dc_crtc *to_dc_crtc(struct drm_crtc *crtc)
+{
+ return container_of(crtc, struct dc_crtc, base);
+}
+
+static u32 dc_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ return dc_fg_get_frame_index(dc_crtc->fg);
+}
+
+static int dc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ enable_irq(dc_crtc->irq_dec_framecomplete);
+
+ return 0;
+}
+
+static void dc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ /* nosync due to atomic context */
+ disable_irq_nosync(dc_crtc->irq_dec_framecomplete);
+}
+
+static const struct drm_crtc_funcs dc_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .get_vblank_counter = dc_crtc_get_vblank_counter,
+ .enable_vblank = dc_crtc_enable_vblank,
+ .disable_vblank = dc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
+};
+
+static void dc_crtc_queue_state_event(struct drm_crtc_state *crtc_state)
+{
+ struct drm_crtc *crtc = crtc_state->crtc;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc_state->event) {
+ WARN_ON(drm_crtc_vblank_get(crtc));
+ WARN_ON(dc_crtc->event);
+ dc_crtc->event = crtc_state->event;
+ crtc_state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static inline enum drm_mode_status
+dc_crtc_check_clock(struct dc_crtc *dc_crtc, int clk_khz)
+{
+ return dc_fg_check_clock(dc_crtc->fg, clk_khz);
+}
+
+static enum drm_mode_status
+dc_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum drm_mode_status status;
+
+ status = dc_crtc_check_clock(dc_crtc, mode->clock);
+ if (status != MODE_OK)
+ return status;
+
+ if (mode->crtc_clock > DC_FRAMEGEN_MAX_CLOCK_KHZ)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static int
+dc_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adj = &new_crtc_state->adjusted_mode;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum drm_mode_status status;
+
+ status = dc_crtc_check_clock(dc_crtc, adj->clock);
+ if (status != MODE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void
+dc_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(crtc->dev);
+ int idx, ret;
+
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ !new_crtc_state->active)
+ return;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ return;
+
+ /* request pixel engine power-on when CRTC starts to be active */
+ ret = pm_runtime_resume_and_get(dc_drm->pe->dev);
+ if (ret)
+ dc_crtc_err(crtc, "failed to get DC pixel engine RPM: %d\n",
+ ret);
+
+ drm_dev_exit(idx);
+}
+
+static void
+dc_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *old_crtc_state =
+ drm_atomic_get_old_crtc_state(state, crtc);
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int idx;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state) ||
+ (!old_crtc_state->active && !new_crtc_state->active))
+ return;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ enable_irq(dc_crtc->irq_ed_cont_shdload);
+
+ /* flush plane update out to display */
+ dc_ed_pec_sync_trigger(dc_crtc->ed_cont);
+
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_cont_shdload_done);
+
+ disable_irq(dc_crtc->irq_ed_cont_shdload);
+
+ DC_CRTC_CHECK_FRAMEGEN_FIFO(dc_crtc->fg);
+
+ drm_dev_exit(idx);
+
+out:
+ dc_crtc_queue_state_event(new_crtc_state);
+}
+
+static void
+dc_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct drm_display_mode *adj = &new_crtc_state->adjusted_mode;
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ enum dc_link_id cf_link;
+ int idx, ret;
+
+ dc_crtc_dbg(crtc, "mode " DRM_MODE_FMT "\n", DRM_MODE_ARG(adj));
+
+ drm_crtc_vblank_on(crtc);
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ /* request display engine power-on when CRTC is enabled */
+ ret = pm_runtime_resume_and_get(dc_crtc->de->dev);
+ if (ret < 0)
+ dc_crtc_err(crtc, "failed to get DC display engine RPM: %d\n",
+ ret);
+
+ enable_irq(dc_crtc->irq_dec_shdload);
+ enable_irq(dc_crtc->irq_ed_cont_shdload);
+ enable_irq(dc_crtc->irq_ed_safe_shdload);
+
+ dc_fg_cfg_videomode(dc_crtc->fg, adj);
+
+ dc_cf_framedimensions(dc_crtc->cf_cont,
+ adj->crtc_hdisplay, adj->crtc_vdisplay);
+ dc_cf_framedimensions(dc_crtc->cf_safe,
+ adj->crtc_hdisplay, adj->crtc_vdisplay);
+
+ /* constframe in safety stream shows blue frame */
+ dc_cf_constantcolor_blue(dc_crtc->cf_safe);
+ cf_link = dc_cf_get_link_id(dc_crtc->cf_safe);
+ dc_ed_pec_src_sel(dc_crtc->ed_safe, cf_link);
+
+ /* show CRTC background if no plane is enabled */
+ if (new_crtc_state->plane_mask == 0) {
+ /* constframe in content stream shows black frame */
+ dc_cf_constantcolor_black(dc_crtc->cf_cont);
+
+ cf_link = dc_cf_get_link_id(dc_crtc->cf_cont);
+ dc_ed_pec_src_sel(dc_crtc->ed_cont, cf_link);
+ }
+
+ dc_fg_enable_clock(dc_crtc->fg);
+ dc_ed_pec_sync_trigger(dc_crtc->ed_cont);
+ dc_ed_pec_sync_trigger(dc_crtc->ed_safe);
+ dc_fg_shdtokgen(dc_crtc->fg);
+ dc_fg_enable(dc_crtc->fg);
+
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_safe_shdload_done);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(ed_cont_shdload_done);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(dec_shdload_done);
+
+ disable_irq(dc_crtc->irq_ed_safe_shdload);
+ disable_irq(dc_crtc->irq_ed_cont_shdload);
+ disable_irq(dc_crtc->irq_dec_shdload);
+
+ DC_CRTC_WAIT_FOR_FRAMEGEN_SECONDARY_SYNCUP(dc_crtc->fg);
+
+ DC_CRTC_CHECK_FRAMEGEN_FIFO(dc_crtc->fg);
+
+ drm_dev_exit(idx);
+
+out:
+ dc_crtc_queue_state_event(new_crtc_state);
+}
+
+static void
+dc_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
+{
+ struct drm_crtc_state *new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, crtc);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(crtc->dev);
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int idx, ret;
+
+ if (!drm_dev_enter(crtc->dev, &idx))
+ goto out;
+
+ enable_irq(dc_crtc->irq_dec_seqcomplete);
+ dc_fg_disable(dc_crtc->fg);
+ DC_CRTC_WAIT_FOR_COMPLETION_TIMEOUT(dec_seqcomplete_done);
+ disable_irq(dc_crtc->irq_dec_seqcomplete);
+
+ dc_fg_disable_clock(dc_crtc->fg);
+
+ /* request pixel engine power-off as plane is off too */
+ ret = pm_runtime_put(dc_drm->pe->dev);
+ if (ret)
+ dc_crtc_err(crtc, "failed to put DC pixel engine RPM: %d\n",
+ ret);
+
+ /* request display engine power-off when CRTC is disabled */
+ ret = pm_runtime_put(dc_crtc->de->dev);
+ if (ret < 0)
+ dc_crtc_err(crtc, "failed to put DC display engine RPM: %d\n",
+ ret);
+
+ drm_dev_exit(idx);
+
+out:
+ drm_crtc_vblank_off(crtc);
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (new_crtc_state->event && !new_crtc_state->active) {
+ drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
+ new_crtc_state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+}
+
+static bool dc_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct dc_crtc *dc_crtc = to_dc_crtc(crtc);
+ int vdisplay = mode->crtc_vdisplay;
+ int vtotal = mode->crtc_vtotal;
+ bool reliable;
+ int line;
+ int idx;
+
+ if (stime)
+ *stime = ktime_get();
+
+ if (!drm_dev_enter(crtc->dev, &idx)) {
+ reliable = false;
+ *vpos = 0;
+ *hpos = 0;
+ goto out;
+ }
+
+ /* line index starts with 0 for the first active output line */
+ line = dc_fg_get_line_index(dc_crtc->fg);
+
+ if (line < vdisplay)
+ /* active scanout area - positive */
+ *vpos = line + 1;
+ else
+ /* inside vblank - negative */
+ *vpos = line - (vtotal - 1);
+
+ *hpos = 0;
+
+ reliable = true;
+
+ drm_dev_exit(idx);
+out:
+ if (etime)
+ *etime = ktime_get();
+
+ return reliable;
+}
+
+static const struct drm_crtc_helper_funcs dc_helper_funcs = {
+ .mode_valid = dc_crtc_mode_valid,
+ .atomic_check = dc_crtc_atomic_check,
+ .atomic_begin = dc_crtc_atomic_begin,
+ .atomic_flush = dc_crtc_atomic_flush,
+ .atomic_enable = dc_crtc_atomic_enable,
+ .atomic_disable = dc_crtc_atomic_disable,
+ .get_scanout_position = dc_crtc_get_scanout_position,
+};
+
+static irqreturn_t dc_crtc_irq_handler_dec_framecomplete(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+ struct drm_crtc *crtc = &dc_crtc->base;
+ unsigned long flags;
+
+ drm_crtc_handle_vblank(crtc);
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (dc_crtc->event) {
+ drm_crtc_send_vblank_event(crtc, dc_crtc->event);
+ dc_crtc->event = NULL;
+ drm_crtc_vblank_put(crtc);
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_dec_seqcomplete_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->dec_seqcomplete_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dc_crtc_irq_handler_dec_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->dec_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_ed_cont_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->ed_cont_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+dc_crtc_irq_handler_ed_safe_shdload_done(int irq, void *dev_id)
+{
+ struct dc_crtc *dc_crtc = dev_id;
+
+ complete(&dc_crtc->ed_safe_shdload_done);
+
+ return IRQ_HANDLED;
+}
+
+static int dc_crtc_request_irqs(struct drm_device *drm, struct dc_crtc *dc_crtc)
+{
+ struct {
+ struct device *dev;
+ unsigned int irq;
+ irqreturn_t (*irq_handler)(int irq, void *dev_id);
+ } irqs[DC_CRTC_IRQS] = {
+ {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_framecomplete,
+ dc_crtc_irq_handler_dec_framecomplete,
+ }, {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_seqcomplete,
+ dc_crtc_irq_handler_dec_seqcomplete_done,
+ }, {
+ dc_crtc->de->dev,
+ dc_crtc->irq_dec_shdload,
+ dc_crtc_irq_handler_dec_shdload_done,
+ }, {
+ dc_crtc->ed_cont->dev,
+ dc_crtc->irq_ed_cont_shdload,
+ dc_crtc_irq_handler_ed_cont_shdload_done,
+ }, {
+ dc_crtc->ed_safe->dev,
+ dc_crtc->irq_ed_safe_shdload,
+ dc_crtc_irq_handler_ed_safe_shdload_done,
+ },
+ };
+ int i, ret;
+
+ for (i = 0; i < DC_CRTC_IRQS; i++) {
+ struct dc_crtc_irq *irq = &dc_crtc->irqs[i];
+
+ ret = devm_request_irq(irqs[i].dev, irqs[i].irq,
+ irqs[i].irq_handler, IRQF_NO_AUTOEN,
+ dev_name(irqs[i].dev), dc_crtc);
+ if (ret) {
+ dev_err(irqs[i].dev, "failed to request irq(%u): %d\n",
+ irqs[i].irq, ret);
+ return ret;
+ }
+
+ irq->dc_crtc = dc_crtc;
+ irq->irq = irqs[i].irq;
+ }
+
+ return 0;
+}
+
+int dc_crtc_init(struct dc_drm_device *dc_drm, int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+ struct dc_de *de = dc_drm->de[crtc_index];
+ struct dc_pe *pe = dc_drm->pe;
+ struct dc_plane *dc_primary;
+ int ret;
+
+ dc_crtc->de = de;
+
+ init_completion(&dc_crtc->dec_seqcomplete_done);
+ init_completion(&dc_crtc->dec_shdload_done);
+ init_completion(&dc_crtc->ed_cont_shdload_done);
+ init_completion(&dc_crtc->ed_safe_shdload_done);
+
+ dc_crtc->cf_cont = pe->cf_cont[crtc_index];
+ dc_crtc->cf_safe = pe->cf_safe[crtc_index];
+ dc_crtc->ed_cont = pe->ed_cont[crtc_index];
+ dc_crtc->ed_safe = pe->ed_safe[crtc_index];
+ dc_crtc->fg = de->fg;
+
+ dc_crtc->irq_dec_framecomplete = de->irq_framecomplete;
+ dc_crtc->irq_dec_seqcomplete = de->irq_seqcomplete;
+ dc_crtc->irq_dec_shdload = de->irq_shdload;
+ dc_crtc->irq_ed_safe_shdload = dc_crtc->ed_safe->irq_shdload;
+ dc_crtc->irq_ed_cont_shdload = dc_crtc->ed_cont->irq_shdload;
+
+ dc_primary = &dc_drm->dc_primary[crtc_index];
+ ret = dc_plane_init(dc_drm, dc_primary);
+ if (ret) {
+ dev_err(de->dev, "failed to initialize primary plane: %d\n",
+ ret);
+ return ret;
+ }
+
+ drm_crtc_helper_add(&dc_crtc->base, &dc_helper_funcs);
+
+ ret = drm_crtc_init_with_planes(drm, &dc_crtc->base, &dc_primary->base,
+ NULL, &dc_crtc_funcs, NULL);
+ if (ret)
+ dev_err(de->dev, "failed to add CRTC: %d\n", ret);
+
+ return ret;
+}
+
+int dc_crtc_post_init(struct dc_drm_device *dc_drm, int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+
+ return dc_crtc_request_irqs(drm, dc_crtc);
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-de.c b/drivers/gpu/drm/imx/dc/dc-de.c
new file mode 100644
index 000000000000..5a3125596fdf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-de.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+
+#define POLARITYCTRL 0xc
+#define POLEN_HIGH BIT(2)
+
+static const struct dc_subdev_info dc_de_info[] = {
+ { .reg_start = 0x5618b400, .id = 0, },
+ { .reg_start = 0x5618b420, .id = 1, },
+};
+
+static const struct regmap_range dc_de_regmap_ranges[] = {
+ regmap_reg_range(POLARITYCTRL, POLARITYCTRL),
+};
+
+static const struct regmap_access_table dc_de_regmap_access_table = {
+ .yes_ranges = dc_de_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_de_regmap_ranges),
+};
+
+static const struct regmap_config dc_de_top_regmap_config = {
+ .name = "top",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_de_regmap_access_table,
+ .rd_table = &dc_de_regmap_access_table,
+ .max_register = POLARITYCTRL,
+};
+
+static inline void dc_dec_init(struct dc_de *de)
+{
+ regmap_write_bits(de->reg_top, POLARITYCTRL, POLARITYCTRL, POLEN_HIGH);
+}
+
+static int dc_de_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_top;
+ void __iomem *base_top;
+ struct dc_de *de;
+ int ret, id;
+
+ de = devm_kzalloc(dev, sizeof(*de), GFP_KERNEL);
+ if (!de)
+ return -ENOMEM;
+
+ base_top = devm_platform_get_and_ioremap_resource(pdev, 0, &res_top);
+ if (IS_ERR(base_top))
+ return PTR_ERR(base_top);
+
+ de->reg_top = devm_regmap_init_mmio(dev, base_top,
+ &dc_de_top_regmap_config);
+ if (IS_ERR(de->reg_top))
+ return PTR_ERR(de->reg_top);
+
+ de->irq_shdload = platform_get_irq_byname(pdev, "shdload");
+ if (de->irq_shdload < 0)
+ return de->irq_shdload;
+
+ de->irq_framecomplete = platform_get_irq_byname(pdev, "framecomplete");
+ if (de->irq_framecomplete < 0)
+ return de->irq_framecomplete;
+
+ de->irq_seqcomplete = platform_get_irq_byname(pdev, "seqcomplete");
+ if (de->irq_seqcomplete < 0)
+ return de->irq_seqcomplete;
+
+ de->dev = dev;
+
+ dev_set_drvdata(dev, de);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ id = dc_subdev_get_id(dc_de_info, ARRAY_SIZE(dc_de_info), res_top);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ dc_drm->de[id] = de;
+
+ return 0;
+}
+
+/*
+ * It's possible to get the child device pointers from the child component
+ * bind callbacks, but it depends on the component helper behavior to bind
+ * the display engine component first. To avoid the dependency, post bind
+ * to get the pointers from dc_drm in a safe manner.
+ */
+void dc_de_post_bind(struct dc_drm_device *dc_drm)
+{
+ struct dc_de *de;
+ int i;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ de = dc_drm->de[i];
+ de->fg = dc_drm->fg[i];
+ de->tc = dc_drm->tc[i];
+ }
+}
+
+static const struct component_ops dc_de_ops = {
+ .bind = dc_de_bind,
+};
+
+static int dc_de_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = component_add(&pdev->dev, &dc_de_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_de_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_de_ops);
+}
+
+static int dc_de_runtime_resume(struct device *dev)
+{
+ struct dc_de *de = dev_get_drvdata(dev);
+
+ dc_dec_init(de);
+ dc_fg_init(de->fg);
+ dc_tc_init(de->tc);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dc_de_pm_ops = {
+ RUNTIME_PM_OPS(NULL, dc_de_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_de_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-display-engine" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_de_dt_ids);
+
+struct platform_driver dc_de_driver = {
+ .probe = dc_de_probe,
+ .remove = dc_de_remove,
+ .driver = {
+ .name = "imx8-dc-display-engine",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_de_dt_ids,
+ .pm = pm_sleep_ptr(&dc_de_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-de.h b/drivers/gpu/drm/imx/dc/dc-de.h
new file mode 100644
index 000000000000..211f3fcc1a9a
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-de.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_DISPLAY_ENGINE_H__
+#define __DC_DISPLAY_ENGINE_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+#include <drm/drm_modes.h>
+
+#define DC_DISPLAYS 2
+
+#define DC_FRAMEGEN_MAX_FRAME_INDEX 0x3ffff
+#define DC_FRAMEGEN_MAX_CLOCK_KHZ 300000
+
+struct dc_fg {
+ struct device *dev;
+ struct regmap *reg;
+ struct clk *clk_disp;
+};
+
+struct dc_tc {
+ struct device *dev;
+ struct regmap *reg;
+};
+
+struct dc_de {
+ struct device *dev;
+ struct regmap *reg_top;
+ struct dc_fg *fg;
+ struct dc_tc *tc;
+ int irq_shdload;
+ int irq_framecomplete;
+ int irq_seqcomplete;
+};
+
+/* Frame Generator Unit */
+void dc_fg_cfg_videomode(struct dc_fg *fg, struct drm_display_mode *m);
+void dc_fg_enable(struct dc_fg *fg);
+void dc_fg_disable(struct dc_fg *fg);
+void dc_fg_shdtokgen(struct dc_fg *fg);
+u32 dc_fg_get_frame_index(struct dc_fg *fg);
+u32 dc_fg_get_line_index(struct dc_fg *fg);
+bool dc_fg_wait_for_frame_index_moving(struct dc_fg *fg);
+bool dc_fg_secondary_requests_to_read_empty_fifo(struct dc_fg *fg);
+void dc_fg_secondary_clear_channel_status(struct dc_fg *fg);
+int dc_fg_wait_for_secondary_syncup(struct dc_fg *fg);
+void dc_fg_enable_clock(struct dc_fg *fg);
+void dc_fg_disable_clock(struct dc_fg *fg);
+enum drm_mode_status dc_fg_check_clock(struct dc_fg *fg, int clk_khz);
+void dc_fg_init(struct dc_fg *fg);
+
+/* Timing Controller Unit */
+void dc_tc_init(struct dc_tc *tc);
+
+#endif /* __DC_DISPLAY_ENGINE_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-drv.c b/drivers/gpu/drm/imx/dc/dc-drv.c
new file mode 100644
index 000000000000..04f021d2d6cf
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-drv.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/clients/drm_client_setup.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fbdev_dma.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_dma_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper.h>
+#include <drm/drm_of.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+struct dc_priv {
+ struct drm_device *drm;
+ struct clk *clk_cfg;
+};
+
+DEFINE_DRM_GEM_DMA_FOPS(dc_drm_driver_fops);
+
+static struct drm_driver dc_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
+ DRM_GEM_DMA_DRIVER_OPS,
+ DRM_FBDEV_DMA_DRIVER_OPS,
+ .fops = &dc_drm_driver_fops,
+ .name = "imx8-dc",
+ .desc = "i.MX8 DC DRM graphics",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+};
+
+static void
+dc_add_components(struct device *dev, struct component_match **matchptr)
+{
+ struct device_node *child, *grandchild;
+
+ for_each_available_child_of_node(dev->of_node, child) {
+ /* The interrupt controller is not a component. */
+ if (of_device_is_compatible(child, "fsl,imx8qxp-dc-intc"))
+ continue;
+
+ drm_of_component_match_add(dev, matchptr, component_compare_of,
+ child);
+
+ for_each_available_child_of_node(child, grandchild)
+ drm_of_component_match_add(dev, matchptr,
+ component_compare_of,
+ grandchild);
+ }
+}
+
+static int dc_drm_component_bind_all(struct dc_drm_device *dc_drm)
+{
+ struct drm_device *drm = &dc_drm->base;
+ int ret;
+
+ ret = component_bind_all(drm->dev, dc_drm);
+ if (ret)
+ return ret;
+
+ dc_de_post_bind(dc_drm);
+ dc_pe_post_bind(dc_drm);
+
+ return 0;
+}
+
+static void dc_drm_component_unbind_all(void *ptr)
+{
+ struct dc_drm_device *dc_drm = ptr;
+ struct drm_device *drm = &dc_drm->base;
+
+ component_unbind_all(drm->dev, dc_drm);
+}
+
+static int dc_drm_bind(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ struct dc_drm_device *dc_drm;
+ struct drm_device *drm;
+ int ret;
+
+ dc_drm = devm_drm_dev_alloc(dev, &dc_drm_driver, struct dc_drm_device,
+ base);
+ if (IS_ERR(dc_drm))
+ return PTR_ERR(dc_drm);
+
+ drm = &dc_drm->base;
+
+ ret = dc_drm_component_bind_all(dc_drm);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, dc_drm_component_unbind_all,
+ dc_drm);
+ if (ret)
+ return ret;
+
+ ret = dc_kms_init(dc_drm);
+ if (ret)
+ return ret;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret) {
+ dev_err(dev, "failed to register drm device: %d\n", ret);
+ goto err;
+ }
+
+ drm_client_setup_with_fourcc(drm, DRM_FORMAT_XRGB8888);
+
+ priv->drm = drm;
+
+ return 0;
+
+err:
+ dc_kms_uninit(dc_drm);
+
+ return ret;
+}
+
+static void dc_drm_unbind(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ struct dc_drm_device *dc_drm = to_dc_drm_device(priv->drm);
+ struct drm_device *drm = &dc_drm->base;
+
+ priv->drm = NULL;
+ drm_dev_unplug(drm);
+ dc_kms_uninit(dc_drm);
+ drm_atomic_helper_shutdown(drm);
+}
+
+static const struct component_master_ops dc_drm_ops = {
+ .bind = dc_drm_bind,
+ .unbind = dc_drm_unbind,
+};
+
+static int dc_probe(struct platform_device *pdev)
+{
+ struct component_match *match = NULL;
+ struct dc_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk_cfg = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk_cfg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cfg),
+ "failed to get cfg clock\n");
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret)
+ return ret;
+
+ dc_add_components(&pdev->dev, &match);
+
+ ret = component_master_add_with_match(&pdev->dev, &dc_drm_ops, match);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component master\n");
+
+ return 0;
+}
+
+static void dc_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &dc_drm_ops);
+}
+
+static int dc_runtime_suspend(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(priv->clk_cfg);
+
+ return 0;
+}
+
+static int dc_runtime_resume(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk_cfg);
+ if (ret)
+ dev_err(dev, "failed to enable cfg clock: %d\n", ret);
+
+ return ret;
+}
+
+static int dc_suspend(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(priv->drm);
+}
+
+static int dc_resume(struct device *dev)
+{
+ struct dc_priv *priv = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(priv->drm);
+}
+
+static void dc_shutdown(struct platform_device *pdev)
+{
+ struct dc_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ drm_atomic_helper_shutdown(priv->drm);
+}
+
+static const struct dev_pm_ops dc_pm_ops = {
+ RUNTIME_PM_OPS(dc_runtime_suspend, dc_runtime_resume, NULL)
+ SYSTEM_SLEEP_PM_OPS(dc_suspend, dc_resume)
+};
+
+static const struct of_device_id dc_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_dt_ids);
+
+static struct platform_driver dc_driver = {
+ .probe = dc_probe,
+ .remove = dc_remove,
+ .shutdown = dc_shutdown,
+ .driver = {
+ .name = "imx8-dc",
+ .of_match_table = dc_dt_ids,
+ .pm = pm_sleep_ptr(&dc_pm_ops),
+ },
+};
+
+static struct platform_driver * const dc_drivers[] = {
+ &dc_cf_driver,
+ &dc_de_driver,
+ &dc_ed_driver,
+ &dc_fg_driver,
+ &dc_fl_driver,
+ &dc_fw_driver,
+ &dc_ic_driver,
+ &dc_lb_driver,
+ &dc_pe_driver,
+ &dc_tc_driver,
+ &dc_driver,
+};
+
+static int __init dc_drm_init(void)
+{
+ return platform_register_drivers(dc_drivers, ARRAY_SIZE(dc_drivers));
+}
+
+static void __exit dc_drm_exit(void)
+{
+ platform_unregister_drivers(dc_drivers, ARRAY_SIZE(dc_drivers));
+}
+
+module_init(dc_drm_init);
+module_exit(dc_drm_exit);
+
+MODULE_DESCRIPTION("i.MX8 Display Controller DRM Driver");
+MODULE_AUTHOR("Liu Ying <victor.liu@nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/imx/dc/dc-drv.h b/drivers/gpu/drm/imx/dc/dc-drv.h
new file mode 100644
index 000000000000..eb61b8c76269
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-drv.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_DRV_H__
+#define __DC_DRV_H__
+
+#include <linux/container_of.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+
+#include "dc-de.h"
+#include "dc-kms.h"
+#include "dc-pe.h"
+
+/**
+ * struct dc_drm_device - DC specific drm_device
+ */
+struct dc_drm_device {
+ /** @base: base drm_device structure */
+ struct drm_device base;
+ /** @dc_crtc: DC specific CRTC list */
+ struct dc_crtc dc_crtc[DC_DISPLAYS];
+ /** @dc_primary: DC specific primary plane list */
+ struct dc_plane dc_primary[DC_DISPLAYS];
+ /** @encoder: encoder list */
+ struct drm_encoder encoder[DC_DISPLAYS];
+ /** @cf_safe: constframe list(safety stream) */
+ struct dc_cf *cf_safe[DC_DISPLAYS];
+ /** @cf_cont: constframe list(content stream) */
+ struct dc_cf *cf_cont[DC_DISPLAYS];
+ /** @de: display engine list */
+ struct dc_de *de[DC_DISPLAYS];
+ /** @ed_safe: extdst list(safety stream) */
+ struct dc_ed *ed_safe[DC_DISPLAYS];
+ /** @ed_cont: extdst list(content stream) */
+ struct dc_ed *ed_cont[DC_DISPLAYS];
+ /** @fg: framegen list */
+ struct dc_fg *fg[DC_DISPLAYS];
+ /** @fu_disp: fetchunit list(used by display engine) */
+ struct dc_fu *fu_disp[DC_DISP_FU_CNT];
+ /** @lb: layerblend list */
+ struct dc_lb *lb[DC_LB_CNT];
+ /** @pe: pixel engine */
+ struct dc_pe *pe;
+ /** @tc: tcon list */
+ struct dc_tc *tc[DC_DISPLAYS];
+};
+
+struct dc_subdev_info {
+ resource_size_t reg_start;
+ int id;
+};
+
+static inline struct dc_drm_device *to_dc_drm_device(struct drm_device *drm)
+{
+ return container_of(drm, struct dc_drm_device, base);
+}
+
+int dc_crtc_init(struct dc_drm_device *dc_drm, int crtc_index);
+int dc_crtc_post_init(struct dc_drm_device *dc_drm, int crtc_index);
+
+int dc_kms_init(struct dc_drm_device *dc_drm);
+void dc_kms_uninit(struct dc_drm_device *dc_drm);
+
+int dc_plane_init(struct dc_drm_device *dc_drm, struct dc_plane *dc_plane);
+
+extern struct platform_driver dc_cf_driver;
+extern struct platform_driver dc_de_driver;
+extern struct platform_driver dc_ed_driver;
+extern struct platform_driver dc_fg_driver;
+extern struct platform_driver dc_fl_driver;
+extern struct platform_driver dc_fw_driver;
+extern struct platform_driver dc_ic_driver;
+extern struct platform_driver dc_lb_driver;
+extern struct platform_driver dc_pe_driver;
+extern struct platform_driver dc_tc_driver;
+
+static inline int dc_subdev_get_id(const struct dc_subdev_info *info,
+ int info_cnt, struct resource *res)
+{
+ int i;
+
+ if (!res)
+ return -EINVAL;
+
+ for (i = 0; i < info_cnt; i++)
+ if (info[i].reg_start == res->start)
+ return info[i].id;
+
+ return -EINVAL;
+}
+
+void dc_de_post_bind(struct dc_drm_device *dc_drm);
+void dc_pe_post_bind(struct dc_drm_device *dc_drm);
+
+#endif /* __DC_DRV_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-ed.c b/drivers/gpu/drm/imx/dc/dc-ed.c
new file mode 100644
index 000000000000..86ecc22d0a55
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-ed.c
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define PIXENGCFG_STATIC 0x8
+#define POWERDOWN BIT(4)
+#define SYNC_MODE BIT(8)
+#define SINGLE 0
+#define DIV_MASK GENMASK(23, 16)
+#define DIV(x) FIELD_PREP(DIV_MASK, (x))
+#define DIV_RESET 0x80
+
+#define PIXENGCFG_DYNAMIC 0xc
+
+#define PIXENGCFG_TRIGGER 0x14
+#define SYNC_TRIGGER BIT(0)
+
+#define STATICCONTROL 0x8
+#define KICK_MODE BIT(8)
+#define EXTERNAL BIT(8)
+#define PERFCOUNTMODE BIT(12)
+
+#define CONTROL 0xc
+#define GAMMAAPPLYENABLE BIT(0)
+
+static const struct dc_subdev_info dc_ed_info[] = {
+ { .reg_start = 0x56180980, .id = 0, },
+ { .reg_start = 0x56180a00, .id = 1, },
+ { .reg_start = 0x561809c0, .id = 4, },
+ { .reg_start = 0x56180a40, .id = 5, },
+};
+
+static const struct regmap_range dc_ed_pec_regmap_write_ranges[] = {
+ regmap_reg_range(PIXENGCFG_STATIC, PIXENGCFG_STATIC),
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+ regmap_reg_range(PIXENGCFG_TRIGGER, PIXENGCFG_TRIGGER),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_write_table = {
+ .yes_ranges = dc_ed_pec_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_write_ranges),
+};
+
+static const struct regmap_range dc_ed_pec_regmap_read_ranges[] = {
+ regmap_reg_range(PIXENGCFG_STATIC, PIXENGCFG_STATIC),
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_read_table = {
+ .yes_ranges = dc_ed_pec_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_read_ranges),
+};
+
+static const struct regmap_range dc_ed_pec_regmap_volatile_ranges[] = {
+ regmap_reg_range(PIXENGCFG_TRIGGER, PIXENGCFG_TRIGGER),
+};
+
+static const struct regmap_access_table dc_ed_pec_regmap_volatile_table = {
+ .yes_ranges = dc_ed_pec_regmap_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_pec_regmap_volatile_ranges),
+};
+
+static const struct regmap_config dc_ed_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ed_pec_regmap_write_table,
+ .rd_table = &dc_ed_pec_regmap_read_table,
+ .volatile_table = &dc_ed_pec_regmap_volatile_table,
+ .max_register = PIXENGCFG_TRIGGER,
+};
+
+static const struct regmap_range dc_ed_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, STATICCONTROL),
+ regmap_reg_range(CONTROL, CONTROL),
+};
+
+static const struct regmap_access_table dc_ed_regmap_access_table = {
+ .yes_ranges = dc_ed_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ed_regmap_ranges),
+};
+
+static const struct regmap_config dc_ed_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ed_regmap_access_table,
+ .rd_table = &dc_ed_regmap_access_table,
+ .max_register = CONTROL,
+};
+
+static const enum dc_link_id src_sels[] = {
+ LINK_ID_NONE,
+ LINK_ID_CONSTFRAME0,
+ LINK_ID_CONSTFRAME1,
+ LINK_ID_CONSTFRAME4,
+ LINK_ID_CONSTFRAME5,
+ LINK_ID_LAYERBLEND3,
+ LINK_ID_LAYERBLEND2,
+ LINK_ID_LAYERBLEND1,
+ LINK_ID_LAYERBLEND0,
+};
+
+static inline void dc_ed_pec_enable_shden(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, SHDEN, SHDEN);
+}
+
+static inline void dc_ed_pec_poweron(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, POWERDOWN, 0);
+}
+
+static inline void dc_ed_pec_sync_mode_single(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, SYNC_MODE, SINGLE);
+}
+
+static inline void dc_ed_pec_div_reset(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_pec, PIXENGCFG_STATIC, DIV_MASK,
+ DIV(DIV_RESET));
+}
+
+void dc_ed_pec_src_sel(struct dc_ed *ed, enum dc_link_id src)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(src_sels); i++) {
+ if (src_sels[i] == src) {
+ regmap_write(ed->reg_pec, PIXENGCFG_DYNAMIC, src);
+ return;
+ }
+ }
+}
+
+void dc_ed_pec_sync_trigger(struct dc_ed *ed)
+{
+ regmap_write(ed->reg_pec, PIXENGCFG_TRIGGER, SYNC_TRIGGER);
+}
+
+static inline void dc_ed_enable_shden(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_ed_kick_mode_external(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, KICK_MODE, EXTERNAL);
+}
+
+static inline void dc_ed_disable_perfcountmode(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, STATICCONTROL, PERFCOUNTMODE, 0);
+}
+
+static inline void dc_ed_disable_gamma_apply(struct dc_ed *ed)
+{
+ regmap_write_bits(ed->reg_cfg, CONTROL, GAMMAAPPLYENABLE, 0);
+}
+
+void dc_ed_init(struct dc_ed *ed)
+{
+ dc_ed_pec_src_sel(ed, LINK_ID_NONE);
+ dc_ed_pec_enable_shden(ed);
+ dc_ed_pec_poweron(ed);
+ dc_ed_pec_sync_mode_single(ed);
+ dc_ed_pec_div_reset(ed);
+ dc_ed_enable_shden(ed);
+ dc_ed_disable_perfcountmode(ed);
+ dc_ed_kick_mode_external(ed);
+ dc_ed_disable_gamma_apply(ed);
+}
+
+static int dc_ed_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_ed *ed;
+ int id;
+
+ ed = devm_kzalloc(dev, sizeof(*ed), GFP_KERNEL);
+ if (!ed)
+ return -ENOMEM;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ ed->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_ed_pec_regmap_config);
+ if (IS_ERR(ed->reg_pec))
+ return PTR_ERR(ed->reg_pec);
+
+ ed->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_ed_cfg_regmap_config);
+ if (IS_ERR(ed->reg_cfg))
+ return PTR_ERR(ed->reg_cfg);
+
+ ed->irq_shdload = platform_get_irq_byname(pdev, "shdload");
+ if (ed->irq_shdload < 0)
+ return ed->irq_shdload;
+
+ ed->dev = dev;
+
+ id = dc_subdev_get_id(dc_ed_info, ARRAY_SIZE(dc_ed_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ switch (id) {
+ case 0:
+ dc_drm->ed_cont[0] = ed;
+ break;
+ case 1:
+ dc_drm->ed_cont[1] = ed;
+ break;
+ case 4:
+ dc_drm->ed_safe[0] = ed;
+ break;
+ case 5:
+ dc_drm->ed_safe[1] = ed;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct component_ops dc_ed_ops = {
+ .bind = dc_ed_bind,
+};
+
+static int dc_ed_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_ed_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_ed_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_ed_ops);
+}
+
+static const struct of_device_id dc_ed_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-extdst" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_ed_dt_ids);
+
+struct platform_driver dc_ed_driver = {
+ .probe = dc_ed_probe,
+ .remove = dc_ed_remove,
+ .driver = {
+ .name = "imx8-dc-extdst",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_ed_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fg.c b/drivers/gpu/drm/imx/dc/dc-fg.c
new file mode 100644
index 000000000000..7f6c1852bf72
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fg.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/units.h>
+
+#include <drm/drm_modes.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+
+#define FGSTCTRL 0x8
+#define FGSYNCMODE_MASK GENMASK(2, 1)
+#define FGSYNCMODE(x) FIELD_PREP(FGSYNCMODE_MASK, (x))
+#define SHDEN BIT(0)
+
+#define HTCFG1 0xc
+#define HTOTAL(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define HACT(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define HTCFG2 0x10
+#define HSEN BIT(31)
+#define HSBP(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define HSYNC(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define VTCFG1 0x14
+#define VTOTAL(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define VACT(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define VTCFG2 0x18
+#define VSEN BIT(31)
+#define VSBP(x) FIELD_PREP(GENMASK(29, 16), ((x) - 1))
+#define VSYNC(x) FIELD_PREP(GENMASK(13, 0), ((x) - 1))
+
+#define PKICKCONFIG 0x2c
+#define SKICKCONFIG 0x30
+#define EN BIT(31)
+#define ROW(x) FIELD_PREP(GENMASK(29, 16), (x))
+#define COL(x) FIELD_PREP(GENMASK(13, 0), (x))
+
+#define PACFG 0x54
+#define SACFG 0x58
+#define STARTY(x) FIELD_PREP(GENMASK(29, 16), ((x) + 1))
+#define STARTX(x) FIELD_PREP(GENMASK(13, 0), ((x) + 1))
+
+#define FGINCTRL 0x5c
+#define FGINCTRLPANIC 0x60
+#define FGDM_MASK GENMASK(2, 0)
+#define ENPRIMALPHA BIT(3)
+#define ENSECALPHA BIT(4)
+
+#define FGCCR 0x64
+#define CCGREEN(x) FIELD_PREP(GENMASK(19, 10), (x))
+
+#define FGENABLE 0x68
+#define FGEN BIT(0)
+
+#define FGSLR 0x6c
+#define SHDTOKGEN BIT(0)
+
+#define FGTIMESTAMP 0x74
+#define FRAMEINDEX(x) FIELD_GET(GENMASK(31, 14), (x))
+#define LINEINDEX(x) FIELD_GET(GENMASK(13, 0), (x))
+
+#define FGCHSTAT 0x78
+#define SECSYNCSTAT BIT(24)
+#define SFIFOEMPTY BIT(16)
+
+#define FGCHSTATCLR 0x7c
+#define CLRSECSTAT BIT(16)
+
+enum dc_fg_syncmode {
+ FG_SYNCMODE_OFF, /* No side-by-side synchronization. */
+};
+
+enum dc_fg_dm {
+ FG_DM_CONSTCOL = 0x1, /* Constant Color Background is shown. */
+ FG_DM_SEC_ON_TOP = 0x5, /* Both inputs overlaid with secondary on top. */
+};
+
+static const struct dc_subdev_info dc_fg_info[] = {
+ { .reg_start = 0x5618b800, .id = 0, },
+ { .reg_start = 0x5618d400, .id = 1, },
+};
+
+static const struct regmap_range dc_fg_regmap_write_ranges[] = {
+ regmap_reg_range(FGSTCTRL, VTCFG2),
+ regmap_reg_range(PKICKCONFIG, SKICKCONFIG),
+ regmap_reg_range(PACFG, FGSLR),
+ regmap_reg_range(FGCHSTATCLR, FGCHSTATCLR),
+};
+
+static const struct regmap_range dc_fg_regmap_read_ranges[] = {
+ regmap_reg_range(FGSTCTRL, VTCFG2),
+ regmap_reg_range(PKICKCONFIG, SKICKCONFIG),
+ regmap_reg_range(PACFG, FGENABLE),
+ regmap_reg_range(FGTIMESTAMP, FGCHSTAT),
+};
+
+static const struct regmap_access_table dc_fg_regmap_write_table = {
+ .yes_ranges = dc_fg_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fg_regmap_write_ranges),
+};
+
+static const struct regmap_access_table dc_fg_regmap_read_table = {
+ .yes_ranges = dc_fg_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fg_regmap_read_ranges),
+};
+
+static const struct regmap_config dc_fg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fg_regmap_write_table,
+ .rd_table = &dc_fg_regmap_read_table,
+ .max_register = FGCHSTATCLR,
+};
+
+static inline void dc_fg_enable_shden(struct dc_fg *fg)
+{
+ regmap_write_bits(fg->reg, FGSTCTRL, SHDEN, SHDEN);
+}
+
+static inline void dc_fg_syncmode(struct dc_fg *fg, enum dc_fg_syncmode mode)
+{
+ regmap_write_bits(fg->reg, FGSTCTRL, FGSYNCMODE_MASK, FGSYNCMODE(mode));
+}
+
+void dc_fg_cfg_videomode(struct dc_fg *fg, struct drm_display_mode *m)
+{
+ u32 hact, htotal, hsync, hsbp;
+ u32 vact, vtotal, vsync, vsbp;
+ u32 kick_row, kick_col;
+ int ret;
+
+ hact = m->crtc_hdisplay;
+ htotal = m->crtc_htotal;
+ hsync = m->crtc_hsync_end - m->crtc_hsync_start;
+ hsbp = m->crtc_htotal - m->crtc_hsync_start;
+
+ vact = m->crtc_vdisplay;
+ vtotal = m->crtc_vtotal;
+ vsync = m->crtc_vsync_end - m->crtc_vsync_start;
+ vsbp = m->crtc_vtotal - m->crtc_vsync_start;
+
+ /* video mode */
+ regmap_write(fg->reg, HTCFG1, HACT(hact) | HTOTAL(htotal));
+ regmap_write(fg->reg, HTCFG2, HSYNC(hsync) | HSBP(hsbp) | HSEN);
+ regmap_write(fg->reg, VTCFG1, VACT(vact) | VTOTAL(vtotal));
+ regmap_write(fg->reg, VTCFG2, VSYNC(vsync) | VSBP(vsbp) | VSEN);
+
+ kick_col = hact + 1;
+ kick_row = vact;
+
+ /* pkickconfig */
+ regmap_write(fg->reg, PKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* skikconfig */
+ regmap_write(fg->reg, SKICKCONFIG, COL(kick_col) | ROW(kick_row) | EN);
+
+ /* primary and secondary area position configuration */
+ regmap_write(fg->reg, PACFG, STARTX(0) | STARTY(0));
+ regmap_write(fg->reg, SACFG, STARTX(0) | STARTY(0));
+
+ /* alpha */
+ regmap_write_bits(fg->reg, FGINCTRL, ENPRIMALPHA | ENSECALPHA, 0);
+ regmap_write_bits(fg->reg, FGINCTRLPANIC, ENPRIMALPHA | ENSECALPHA, 0);
+
+ /* constant color is green(used in panic mode) */
+ regmap_write(fg->reg, FGCCR, CCGREEN(0x3ff));
+
+ ret = clk_set_rate(fg->clk_disp, m->clock * HZ_PER_KHZ);
+ if (ret < 0)
+ dev_err(fg->dev, "failed to set display clock rate: %d\n", ret);
+}
+
+static inline void dc_fg_displaymode(struct dc_fg *fg, enum dc_fg_dm mode)
+{
+ regmap_write_bits(fg->reg, FGINCTRL, FGDM_MASK, mode);
+}
+
+static inline void dc_fg_panic_displaymode(struct dc_fg *fg, enum dc_fg_dm mode)
+{
+ regmap_write_bits(fg->reg, FGINCTRLPANIC, FGDM_MASK, mode);
+}
+
+void dc_fg_enable(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGENABLE, FGEN);
+}
+
+void dc_fg_disable(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGENABLE, 0);
+}
+
+void dc_fg_shdtokgen(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGSLR, SHDTOKGEN);
+}
+
+u32 dc_fg_get_frame_index(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGTIMESTAMP, &val);
+
+ return FRAMEINDEX(val);
+}
+
+u32 dc_fg_get_line_index(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGTIMESTAMP, &val);
+
+ return LINEINDEX(val);
+}
+
+bool dc_fg_wait_for_frame_index_moving(struct dc_fg *fg)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ u32 frame_index, last_frame_index;
+
+ frame_index = dc_fg_get_frame_index(fg);
+ do {
+ last_frame_index = frame_index;
+ frame_index = dc_fg_get_frame_index(fg);
+ } while (last_frame_index == frame_index &&
+ time_before(jiffies, timeout));
+
+ return last_frame_index != frame_index;
+}
+
+bool dc_fg_secondary_requests_to_read_empty_fifo(struct dc_fg *fg)
+{
+ u32 val;
+
+ regmap_read(fg->reg, FGCHSTAT, &val);
+
+ return !!(val & SFIFOEMPTY);
+}
+
+void dc_fg_secondary_clear_channel_status(struct dc_fg *fg)
+{
+ regmap_write(fg->reg, FGCHSTATCLR, CLRSECSTAT);
+}
+
+int dc_fg_wait_for_secondary_syncup(struct dc_fg *fg)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(fg->reg, FGCHSTAT, val,
+ val & SECSYNCSTAT, 5, 100000);
+}
+
+void dc_fg_enable_clock(struct dc_fg *fg)
+{
+ int ret;
+
+ ret = clk_prepare_enable(fg->clk_disp);
+ if (ret)
+ dev_err(fg->dev, "failed to enable display clock: %d\n", ret);
+}
+
+void dc_fg_disable_clock(struct dc_fg *fg)
+{
+ clk_disable_unprepare(fg->clk_disp);
+}
+
+enum drm_mode_status dc_fg_check_clock(struct dc_fg *fg, int clk_khz)
+{
+ unsigned long rounded_rate;
+
+ rounded_rate = clk_round_rate(fg->clk_disp, clk_khz * HZ_PER_KHZ);
+
+ if (rounded_rate != clk_khz * HZ_PER_KHZ)
+ return MODE_NOCLOCK;
+
+ return MODE_OK;
+}
+
+void dc_fg_init(struct dc_fg *fg)
+{
+ dc_fg_enable_shden(fg);
+ dc_fg_syncmode(fg, FG_SYNCMODE_OFF);
+ dc_fg_displaymode(fg, FG_DM_SEC_ON_TOP);
+ dc_fg_panic_displaymode(fg, FG_DM_CONSTCOL);
+}
+
+static int dc_fg_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res;
+ void __iomem *base;
+ struct dc_fg *fg;
+ int id;
+
+ fg = devm_kzalloc(dev, sizeof(*fg), GFP_KERNEL);
+ if (!fg)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ fg->reg = devm_regmap_init_mmio(dev, base, &dc_fg_regmap_config);
+ if (IS_ERR(fg->reg))
+ return PTR_ERR(fg->reg);
+
+ fg->clk_disp = devm_clk_get(dev, NULL);
+ if (IS_ERR(fg->clk_disp))
+ return dev_err_probe(dev, PTR_ERR(fg->clk_disp),
+ "failed to get display clock\n");
+
+ id = dc_subdev_get_id(dc_fg_info, ARRAY_SIZE(dc_fg_info), res);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fg->dev = dev;
+ dc_drm->fg[id] = fg;
+
+ return 0;
+}
+
+static const struct component_ops dc_fg_ops = {
+ .bind = dc_fg_bind,
+};
+
+static int dc_fg_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fg_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fg_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fg_ops);
+}
+
+static const struct of_device_id dc_fg_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-framegen" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fg_dt_ids);
+
+struct platform_driver dc_fg_driver = {
+ .probe = dc_fg_probe,
+ .remove = dc_fg_remove,
+ .driver = {
+ .name = "imx8-dc-framegen",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fg_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fl.c b/drivers/gpu/drm/imx/dc/dc-fl.c
new file mode 100644
index 000000000000..3ce24c72aa13
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fl.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/ioport.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+
+#define BASEADDRESS(x) (0x10 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERATTRIBUTES(x) (0x14 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERDIMENSION(x) (0x18 + FRAC_OFFSET * (x))
+#define COLORCOMPONENTBITS(x) (0x1c + FRAC_OFFSET * (x))
+#define COLORCOMPONENTSHIFT(x) (0x20 + FRAC_OFFSET * (x))
+#define LAYEROFFSET(x) (0x24 + FRAC_OFFSET * (x))
+#define CLIPWINDOWOFFSET(x) (0x28 + FRAC_OFFSET * (x))
+#define CLIPWINDOWDIMENSIONS(x) (0x2c + FRAC_OFFSET * (x))
+#define CONSTANTCOLOR(x) (0x30 + FRAC_OFFSET * (x))
+#define LAYERPROPERTY(x) (0x34 + FRAC_OFFSET * (x))
+#define FRAMEDIMENSIONS 0x150
+
+struct dc_fl {
+ struct dc_fu fu;
+};
+
+static const struct dc_subdev_info dc_fl_info[] = {
+ { .reg_start = 0x56180ac0, .id = 0, },
+};
+
+static const struct regmap_range dc_fl_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, FRAMEDIMENSIONS),
+};
+
+static const struct regmap_access_table dc_fl_regmap_access_table = {
+ .yes_ranges = dc_fl_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fl_regmap_ranges),
+};
+
+static const struct regmap_config dc_fl_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fl_regmap_access_table,
+ .rd_table = &dc_fl_regmap_access_table,
+ .max_register = FRAMEDIMENSIONS,
+};
+
+static void dc_fl_set_fmt(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format)
+{
+ u32 bits = 0, shifts = 0;
+
+ dc_fu_set_src_bpp(fu, frac, format->cpp[0] * 8);
+
+ regmap_write_bits(fu->reg_cfg, LAYERPROPERTY(frac),
+ YUVCONVERSIONMODE_MASK,
+ YUVCONVERSIONMODE(YUVCONVERSIONMODE_OFF));
+
+ dc_fu_get_pixel_format_bits(fu, format->format, &bits);
+ dc_fu_get_pixel_format_shifts(fu, format->format, &shifts);
+
+ regmap_write(fu->reg_cfg, COLORCOMPONENTBITS(frac), bits);
+ regmap_write(fu->reg_cfg, COLORCOMPONENTSHIFT(frac), shifts);
+}
+
+static void dc_fl_set_framedimensions(struct dc_fu *fu, int w, int h)
+{
+ regmap_write(fu->reg_cfg, FRAMEDIMENSIONS,
+ FRAMEWIDTH(w) | FRAMEHEIGHT(h));
+}
+
+static void dc_fl_init(struct dc_fu *fu)
+{
+ dc_fu_common_hw_init(fu);
+ dc_fu_shdldreq_sticky(fu, 0xff);
+}
+
+static void dc_fl_set_ops(struct dc_fu *fu)
+{
+ memcpy(&fu->ops, &dc_fu_common_ops, sizeof(dc_fu_common_ops));
+ fu->ops.init = dc_fl_init;
+ fu->ops.set_fmt = dc_fl_set_fmt;
+ fu->ops.set_framedimensions = dc_fl_set_framedimensions;
+}
+
+static int dc_fl_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_cfg;
+ struct dc_fl *fl;
+ struct dc_fu *fu;
+ int i, id;
+
+ fl = devm_kzalloc(dev, sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ return -ENOMEM;
+
+ fu = &fl->fu;
+
+ res_pec = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ fu->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_fl_cfg_regmap_config);
+ if (IS_ERR(fu->reg_cfg))
+ return PTR_ERR(fu->reg_cfg);
+
+ id = dc_subdev_get_id(dc_fl_info, ARRAY_SIZE(dc_fl_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fu->link_id = LINK_ID_FETCHLAYER0;
+ fu->id = DC_FETCHUNIT_FL0;
+ for (i = 0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ fu->reg_baseaddr[i] = BASEADDRESS(i);
+ fu->reg_sourcebufferattributes[i] = SOURCEBUFFERATTRIBUTES(i);
+ fu->reg_sourcebufferdimension[i] = SOURCEBUFFERDIMENSION(i);
+ fu->reg_layeroffset[i] = LAYEROFFSET(i);
+ fu->reg_clipwindowoffset[i] = CLIPWINDOWOFFSET(i);
+ fu->reg_clipwindowdimensions[i] = CLIPWINDOWDIMENSIONS(i);
+ fu->reg_constantcolor[i] = CONSTANTCOLOR(i);
+ fu->reg_layerproperty[i] = LAYERPROPERTY(i);
+ }
+ snprintf(fu->name, sizeof(fu->name), "FetchLayer%d", id);
+
+ dc_fl_set_ops(fu);
+
+ dc_drm->fu_disp[fu->id] = fu;
+
+ return 0;
+}
+
+static const struct component_ops dc_fl_ops = {
+ .bind = dc_fl_bind,
+};
+
+static int dc_fl_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fl_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fl_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fl_ops);
+}
+
+static const struct of_device_id dc_fl_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-fetchlayer" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fl_dt_ids);
+
+struct platform_driver dc_fl_driver = {
+ .probe = dc_fl_probe,
+ .remove = dc_fl_remove,
+ .driver = {
+ .name = "imx8-dc-fetchlayer",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fl_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.c b/drivers/gpu/drm/imx/dc/dc-fu.c
new file mode 100644
index 000000000000..f94c591c8158
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fu.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bits.h>
+#include <linux/math.h>
+
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+/* STATICCONTROL */
+#define SHDLDREQSTICKY_MASK GENMASK(31, 24)
+#define SHDLDREQSTICKY(x) FIELD_PREP(SHDLDREQSTICKY_MASK, (x))
+#define BASEADDRESSAUTOUPDATE_MASK GENMASK(23, 16)
+#define BASEADDRESSAUTOUPDATE(x) FIELD_PREP(BASEADDRESSAUTOUPDATE_MASK, (x))
+
+/* BURSTBUFFERMANAGEMENT */
+#define SETBURSTLENGTH_MASK GENMASK(12, 8)
+#define SETBURSTLENGTH(x) FIELD_PREP(SETBURSTLENGTH_MASK, (x))
+#define SETNUMBUFFERS_MASK GENMASK(7, 0)
+#define SETNUMBUFFERS(x) FIELD_PREP(SETNUMBUFFERS_MASK, (x))
+#define LINEMODE_MASK BIT(31)
+
+/* SOURCEBUFFERATTRIBUTES */
+#define BITSPERPIXEL_MASK GENMASK(21, 16)
+#define BITSPERPIXEL(x) FIELD_PREP(BITSPERPIXEL_MASK, (x))
+#define STRIDE_MASK GENMASK(15, 0)
+#define STRIDE(x) FIELD_PREP(STRIDE_MASK, (x) - 1)
+
+/* SOURCEBUFFERDIMENSION */
+#define LINEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
+#define LINECOUNT(x) FIELD_PREP(GENMASK(29, 16), (x))
+
+/* LAYEROFFSET */
+#define LAYERXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
+#define LAYERYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+
+/* CLIPWINDOWOFFSET */
+#define CLIPWINDOWXOFFSET(x) FIELD_PREP(GENMASK(14, 0), (x))
+#define CLIPWINDOWYOFFSET(x) FIELD_PREP(GENMASK(30, 16), (x))
+
+/* CLIPWINDOWDIMENSIONS */
+#define CLIPWINDOWWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x) - 1)
+#define CLIPWINDOWHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x) - 1)
+
+enum dc_linemode {
+ /*
+ * Mandatory setting for operation in the Display Controller.
+ * Works also for Blit Engine with marginal performance impact.
+ */
+ LINEMODE_DISPLAY = 0,
+};
+
+struct dc_fu_pixel_format {
+ u32 pixel_format;
+ u32 bits;
+ u32 shifts;
+};
+
+static const struct dc_fu_pixel_format pixel_formats[] = {
+ {
+ DRM_FORMAT_XRGB8888,
+ R_BITS(8) | G_BITS(8) | B_BITS(8) | A_BITS(0),
+ R_SHIFT(16) | G_SHIFT(8) | B_SHIFT(0) | A_SHIFT(0),
+ },
+};
+
+void dc_fu_get_pixel_format_bits(struct dc_fu *fu, u32 format, u32 *bits)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ if (pixel_formats[i].pixel_format == format) {
+ *bits = pixel_formats[i].bits;
+ return;
+ }
+ }
+}
+
+void
+dc_fu_get_pixel_format_shifts(struct dc_fu *fu, u32 format, u32 *shifts)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pixel_formats); i++) {
+ if (pixel_formats[i].pixel_format == format) {
+ *shifts = pixel_formats[i].shifts;
+ return;
+ }
+ }
+}
+
+static inline void dc_fu_enable_shden(struct dc_fu *fu)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_fu_baddr_autoupdate(struct dc_fu *fu, u8 layer_mask)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL,
+ BASEADDRESSAUTOUPDATE_MASK,
+ BASEADDRESSAUTOUPDATE(layer_mask));
+}
+
+void dc_fu_shdldreq_sticky(struct dc_fu *fu, u8 layer_mask)
+{
+ regmap_write_bits(fu->reg_cfg, STATICCONTROL, SHDLDREQSTICKY_MASK,
+ SHDLDREQSTICKY(layer_mask));
+}
+
+static inline void dc_fu_set_linemode(struct dc_fu *fu, enum dc_linemode mode)
+{
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT, LINEMODE_MASK,
+ mode);
+}
+
+static inline void dc_fu_set_numbuffers(struct dc_fu *fu, unsigned int num)
+{
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT,
+ SETNUMBUFFERS_MASK, SETNUMBUFFERS(num));
+}
+
+static void dc_fu_set_burstlength(struct dc_fu *fu, dma_addr_t baddr)
+{
+ unsigned int burst_size, burst_length;
+
+ burst_size = 1 << __ffs(baddr);
+ burst_size = round_up(burst_size, 8);
+ burst_size = min(burst_size, 128U);
+ burst_length = burst_size / 8;
+
+ regmap_write_bits(fu->reg_cfg, BURSTBUFFERMANAGEMENT,
+ SETBURSTLENGTH_MASK, SETBURSTLENGTH(burst_length));
+}
+
+static void dc_fu_set_baseaddress(struct dc_fu *fu, enum dc_fu_frac frac,
+ dma_addr_t baddr)
+{
+ regmap_write(fu->reg_cfg, fu->reg_baseaddr[frac], baddr);
+}
+
+void dc_fu_set_src_bpp(struct dc_fu *fu, enum dc_fu_frac frac, unsigned int bpp)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_sourcebufferattributes[frac],
+ BITSPERPIXEL_MASK, BITSPERPIXEL(bpp));
+}
+
+static void dc_fu_set_src_stride(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int stride)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_sourcebufferattributes[frac],
+ STRIDE_MASK, STRIDE(stride));
+}
+
+static void dc_fu_set_src_buf_dimensions(struct dc_fu *fu, enum dc_fu_frac frac,
+ int w, int h)
+{
+ regmap_write(fu->reg_cfg, fu->reg_sourcebufferdimension[frac],
+ LINEWIDTH(w) | LINECOUNT(h));
+}
+
+static inline void dc_fu_layeroffset(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int x, unsigned int y)
+{
+ regmap_write(fu->reg_cfg, fu->reg_layeroffset[frac],
+ LAYERXOFFSET(x) | LAYERYOFFSET(y));
+}
+
+static inline void dc_fu_clipoffset(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int x, unsigned int y)
+{
+ regmap_write(fu->reg_cfg, fu->reg_clipwindowoffset[frac],
+ CLIPWINDOWXOFFSET(x) | CLIPWINDOWYOFFSET(y));
+}
+
+static inline void dc_fu_clipdimensions(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int w, unsigned int h)
+{
+ regmap_write(fu->reg_cfg, fu->reg_clipwindowdimensions[frac],
+ CLIPWINDOWWIDTH(w) | CLIPWINDOWHEIGHT(h));
+}
+
+static inline void
+dc_fu_set_pixel_blend_mode(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write(fu->reg_cfg, fu->reg_layerproperty[frac], 0);
+ regmap_write(fu->reg_cfg, fu->reg_constantcolor[frac], 0);
+}
+
+static void dc_fu_enable_src_buf(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_layerproperty[frac],
+ SOURCEBUFFERENABLE, SOURCEBUFFERENABLE);
+}
+
+static void dc_fu_disable_src_buf(struct dc_fu *fu, enum dc_fu_frac frac)
+{
+ regmap_write_bits(fu->reg_cfg, fu->reg_layerproperty[frac],
+ SOURCEBUFFERENABLE, 0);
+
+ if (fu->lb) {
+ dc_lb_pec_clken(fu->lb, CLKEN_DISABLE);
+ dc_lb_mode(fu->lb, LB_NEUTRAL);
+ }
+}
+
+static void dc_fu_set_layerblend(struct dc_fu *fu, struct dc_lb *lb)
+{
+ fu->lb = lb;
+}
+
+static enum dc_link_id dc_fu_get_link_id(struct dc_fu *fu)
+{
+ return fu->link_id;
+}
+
+static const char *dc_fu_get_name(struct dc_fu *fu)
+{
+ return fu->name;
+}
+
+const struct dc_fu_ops dc_fu_common_ops = {
+ .set_burstlength = dc_fu_set_burstlength,
+ .set_baseaddress = dc_fu_set_baseaddress,
+ .set_src_stride = dc_fu_set_src_stride,
+ .set_src_buf_dimensions = dc_fu_set_src_buf_dimensions,
+ .enable_src_buf = dc_fu_enable_src_buf,
+ .disable_src_buf = dc_fu_disable_src_buf,
+ .set_layerblend = dc_fu_set_layerblend,
+ .get_link_id = dc_fu_get_link_id,
+ .get_name = dc_fu_get_name,
+};
+
+const struct dc_fu_ops *dc_fu_get_ops(struct dc_fu *fu)
+{
+ return &fu->ops;
+}
+
+void dc_fu_common_hw_init(struct dc_fu *fu)
+{
+ enum dc_fu_frac i;
+
+ dc_fu_baddr_autoupdate(fu, 0x0);
+ dc_fu_enable_shden(fu);
+ dc_fu_set_linemode(fu, LINEMODE_DISPLAY);
+ dc_fu_set_numbuffers(fu, 16);
+
+ for (i = DC_FETCHUNIT_FRAC0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ dc_fu_layeroffset(fu, i, 0, 0);
+ dc_fu_clipoffset(fu, i, 0, 0);
+ dc_fu_clipdimensions(fu, i, 1, 1);
+ dc_fu_disable_src_buf(fu, i);
+ dc_fu_set_pixel_blend_mode(fu, i);
+ }
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-fu.h b/drivers/gpu/drm/imx/dc/dc-fu.h
new file mode 100644
index 000000000000..e016e1ea5b4e
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fu.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_FETCHUNIT_H__
+#define __DC_FETCHUNIT_H__
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/regmap.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-pe.h"
+
+#define FRAC_OFFSET 0x28
+
+#define STATICCONTROL 0x8
+#define BURSTBUFFERMANAGEMENT 0xc
+
+/* COLORCOMPONENTBITS */
+#define R_BITS(x) FIELD_PREP_CONST(GENMASK(27, 24), (x))
+#define G_BITS(x) FIELD_PREP_CONST(GENMASK(19, 16), (x))
+#define B_BITS(x) FIELD_PREP_CONST(GENMASK(11, 8), (x))
+#define A_BITS(x) FIELD_PREP_CONST(GENMASK(3, 0), (x))
+
+/* COLORCOMPONENTSHIFT */
+#define R_SHIFT(x) FIELD_PREP_CONST(GENMASK(28, 24), (x))
+#define G_SHIFT(x) FIELD_PREP_CONST(GENMASK(20, 16), (x))
+#define B_SHIFT(x) FIELD_PREP_CONST(GENMASK(12, 8), (x))
+#define A_SHIFT(x) FIELD_PREP_CONST(GENMASK(4, 0), (x))
+
+/* LAYERPROPERTY */
+#define YUVCONVERSIONMODE_MASK GENMASK(18, 17)
+#define YUVCONVERSIONMODE(x) FIELD_PREP(YUVCONVERSIONMODE_MASK, (x))
+#define SOURCEBUFFERENABLE BIT(31)
+
+/* FRAMEDIMENSIONS */
+#define FRAMEWIDTH(x) FIELD_PREP(GENMASK(13, 0), (x))
+#define FRAMEHEIGHT(x) FIELD_PREP(GENMASK(29, 16), (x))
+
+/* CONTROL */
+#define INPUTSELECT_MASK GENMASK(4, 3)
+#define INPUTSELECT(x) FIELD_PREP(INPUTSELECT_MASK, (x))
+#define RASTERMODE_MASK GENMASK(2, 0)
+#define RASTERMODE(x) FIELD_PREP(RASTERMODE_MASK, (x))
+
+enum dc_yuvconversionmode {
+ YUVCONVERSIONMODE_OFF,
+};
+
+enum dc_inputselect {
+ INPUTSELECT_INACTIVE,
+};
+
+enum dc_rastermode {
+ RASTERMODE_NORMAL,
+};
+
+enum {
+ DC_FETCHUNIT_FL0,
+ DC_FETCHUNIT_FW2,
+};
+
+enum dc_fu_frac {
+ DC_FETCHUNIT_FRAC0,
+ DC_FETCHUNIT_FRAC1,
+ DC_FETCHUNIT_FRAC2,
+ DC_FETCHUNIT_FRAC3,
+ DC_FETCHUNIT_FRAC4,
+ DC_FETCHUNIT_FRAC5,
+ DC_FETCHUNIT_FRAC6,
+ DC_FETCHUNIT_FRAC7,
+ DC_FETCHUNIT_FRAC_NUM
+};
+
+struct dc_fu;
+struct dc_lb;
+
+struct dc_fu_ops {
+ void (*init)(struct dc_fu *fu);
+ void (*set_burstlength)(struct dc_fu *fu, dma_addr_t baddr);
+ void (*set_baseaddress)(struct dc_fu *fu, enum dc_fu_frac frac,
+ dma_addr_t baddr);
+ void (*set_src_stride)(struct dc_fu *fu, enum dc_fu_frac frac,
+ unsigned int stride);
+ void (*set_src_buf_dimensions)(struct dc_fu *fu, enum dc_fu_frac frac,
+ int w, int h);
+ void (*set_fmt)(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format);
+ void (*enable_src_buf)(struct dc_fu *fu, enum dc_fu_frac frac);
+ void (*disable_src_buf)(struct dc_fu *fu, enum dc_fu_frac frac);
+ void (*set_framedimensions)(struct dc_fu *fu, int w, int h);
+ void (*set_layerblend)(struct dc_fu *fu, struct dc_lb *lb);
+ enum dc_link_id (*get_link_id)(struct dc_fu *fu);
+ const char *(*get_name)(struct dc_fu *fu);
+};
+
+struct dc_fu {
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ char name[21];
+ u32 reg_baseaddr[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_sourcebufferattributes[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_sourcebufferdimension[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_layeroffset[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_clipwindowoffset[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_clipwindowdimensions[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_constantcolor[DC_FETCHUNIT_FRAC_NUM];
+ u32 reg_layerproperty[DC_FETCHUNIT_FRAC_NUM];
+ unsigned int id;
+ enum dc_link_id link_id;
+ struct dc_fu_ops ops;
+ struct dc_lb *lb;
+};
+
+extern const struct dc_fu_ops dc_fu_common_ops;
+
+void dc_fu_get_pixel_format_bits(struct dc_fu *fu, u32 format, u32 *bits);
+void dc_fu_get_pixel_format_shifts(struct dc_fu *fu, u32 format, u32 *shifts);
+void dc_fu_shdldreq_sticky(struct dc_fu *fu, u8 layer_mask);
+void dc_fu_set_src_bpp(struct dc_fu *fu, enum dc_fu_frac frac, unsigned int bpp);
+void dc_fu_common_hw_init(struct dc_fu *fu);
+
+const struct dc_fu_ops *dc_fu_get_ops(struct dc_fu *fu);
+
+#endif /* __DC_FETCHUNIT_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-fw.c b/drivers/gpu/drm/imx/dc/dc-fw.c
new file mode 100644
index 000000000000..acb2d4d9e2ec
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-fw.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+
+#define BASEADDRESS(x) (0x10 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERATTRIBUTES(x) (0x14 + FRAC_OFFSET * (x))
+#define SOURCEBUFFERDIMENSION(x) (0x18 + FRAC_OFFSET * (x))
+#define COLORCOMPONENTBITS(x) (0x1c + FRAC_OFFSET * (x))
+#define COLORCOMPONENTSHIFT(x) (0x20 + FRAC_OFFSET * (x))
+#define LAYEROFFSET(x) (0x24 + FRAC_OFFSET * (x))
+#define CLIPWINDOWOFFSET(x) (0x28 + FRAC_OFFSET * (x))
+#define CLIPWINDOWDIMENSIONS(x) (0x2c + FRAC_OFFSET * (x))
+#define CONSTANTCOLOR(x) (0x30 + FRAC_OFFSET * (x))
+#define LAYERPROPERTY(x) (0x34 + FRAC_OFFSET * (x))
+#define FRAMEDIMENSIONS 0x150
+#define CONTROL 0x170
+
+struct dc_fw {
+ struct dc_fu fu;
+};
+
+static const struct dc_subdev_info dc_fw_info[] = {
+ { .reg_start = 0x56180a60, .id = 2, },
+};
+
+static const struct regmap_range dc_fw_pec_regmap_access_ranges[] = {
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_fw_pec_regmap_access_table = {
+ .yes_ranges = dc_fw_pec_regmap_access_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fw_pec_regmap_access_ranges),
+};
+
+static const struct regmap_config dc_fw_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fw_pec_regmap_access_table,
+ .rd_table = &dc_fw_pec_regmap_access_table,
+ .max_register = PIXENGCFG_DYNAMIC,
+};
+
+static const struct regmap_range dc_fw_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, FRAMEDIMENSIONS),
+ regmap_reg_range(CONTROL, CONTROL),
+};
+
+static const struct regmap_access_table dc_fw_regmap_access_table = {
+ .yes_ranges = dc_fw_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_fw_regmap_ranges),
+};
+
+static const struct regmap_config dc_fw_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_fw_regmap_access_table,
+ .rd_table = &dc_fw_regmap_access_table,
+ .max_register = CONTROL,
+};
+
+static void dc_fw_set_fmt(struct dc_fu *fu, enum dc_fu_frac frac,
+ const struct drm_format_info *format)
+{
+ u32 bits = 0, shifts = 0;
+
+ dc_fu_set_src_bpp(fu, frac, format->cpp[0] * 8);
+
+ regmap_write_bits(fu->reg_cfg, CONTROL, INPUTSELECT_MASK,
+ INPUTSELECT(INPUTSELECT_INACTIVE));
+ regmap_write_bits(fu->reg_cfg, CONTROL, RASTERMODE_MASK,
+ RASTERMODE(RASTERMODE_NORMAL));
+
+ regmap_write_bits(fu->reg_cfg, LAYERPROPERTY(frac),
+ YUVCONVERSIONMODE_MASK,
+ YUVCONVERSIONMODE(YUVCONVERSIONMODE_OFF));
+
+ dc_fu_get_pixel_format_bits(fu, format->format, &bits);
+ dc_fu_get_pixel_format_shifts(fu, format->format, &shifts);
+
+ regmap_write(fu->reg_cfg, COLORCOMPONENTBITS(frac), bits);
+ regmap_write(fu->reg_cfg, COLORCOMPONENTSHIFT(frac), shifts);
+}
+
+static void dc_fw_set_framedimensions(struct dc_fu *fu, int w, int h)
+{
+ regmap_write(fu->reg_cfg, FRAMEDIMENSIONS,
+ FRAMEWIDTH(w) | FRAMEHEIGHT(h));
+}
+
+static void dc_fw_init(struct dc_fu *fu)
+{
+ regmap_write(fu->reg_pec, PIXENGCFG_DYNAMIC, LINK_ID_NONE);
+ dc_fu_common_hw_init(fu);
+ dc_fu_shdldreq_sticky(fu, 0xff);
+}
+
+static void dc_fw_set_ops(struct dc_fu *fu)
+{
+ memcpy(&fu->ops, &dc_fu_common_ops, sizeof(dc_fu_common_ops));
+ fu->ops.init = dc_fw_init;
+ fu->ops.set_fmt = dc_fw_set_fmt;
+ fu->ops.set_framedimensions = dc_fw_set_framedimensions;
+}
+
+static int dc_fw_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_fw *fw;
+ struct dc_fu *fu;
+ int i, id;
+
+ fw = devm_kzalloc(dev, sizeof(*fw), GFP_KERNEL);
+ if (!fw)
+ return -ENOMEM;
+
+ fu = &fw->fu;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ fu->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_fw_pec_regmap_config);
+ if (IS_ERR(fu->reg_pec))
+ return PTR_ERR(fu->reg_pec);
+
+ fu->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_fw_cfg_regmap_config);
+ if (IS_ERR(fu->reg_cfg))
+ return PTR_ERR(fu->reg_cfg);
+
+ id = dc_subdev_get_id(dc_fw_info, ARRAY_SIZE(dc_fw_info), res_pec);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ fu->link_id = LINK_ID_FETCHWARP2;
+ fu->id = DC_FETCHUNIT_FW2;
+ for (i = 0; i < DC_FETCHUNIT_FRAC_NUM; i++) {
+ fu->reg_baseaddr[i] = BASEADDRESS(i);
+ fu->reg_sourcebufferattributes[i] = SOURCEBUFFERATTRIBUTES(i);
+ fu->reg_sourcebufferdimension[i] = SOURCEBUFFERDIMENSION(i);
+ fu->reg_layeroffset[i] = LAYEROFFSET(i);
+ fu->reg_clipwindowoffset[i] = CLIPWINDOWOFFSET(i);
+ fu->reg_clipwindowdimensions[i] = CLIPWINDOWDIMENSIONS(i);
+ fu->reg_constantcolor[i] = CONSTANTCOLOR(i);
+ fu->reg_layerproperty[i] = LAYERPROPERTY(i);
+ }
+ snprintf(fu->name, sizeof(fu->name), "FetchWarp%d", id);
+
+ dc_fw_set_ops(fu);
+
+ dc_drm->fu_disp[fu->id] = fu;
+
+ return 0;
+}
+
+static const struct component_ops dc_fw_ops = {
+ .bind = dc_fw_bind,
+};
+
+static int dc_fw_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_fw_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_fw_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_fw_ops);
+}
+
+static const struct of_device_id dc_fw_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-fetchwarp" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_fw_dt_ids);
+
+struct platform_driver dc_fw_driver = {
+ .probe = dc_fw_probe,
+ .remove = dc_fw_remove,
+ .driver = {
+ .name = "imx8-dc-fetchwarp",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_fw_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-ic.c b/drivers/gpu/drm/imx/dc/dc-ic.c
new file mode 100644
index 000000000000..a270ae4030cd
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-ic.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#define USERINTERRUPTMASK(n) (0x8 + 4 * (n))
+#define INTERRUPTENABLE(n) (0x10 + 4 * (n))
+#define INTERRUPTPRESET(n) (0x18 + 4 * (n))
+#define INTERRUPTCLEAR(n) (0x20 + 4 * (n))
+#define INTERRUPTSTATUS(n) (0x28 + 4 * (n))
+#define USERINTERRUPTENABLE(n) (0x40 + 4 * (n))
+#define USERINTERRUPTPRESET(n) (0x48 + 4 * (n))
+#define USERINTERRUPTCLEAR(n) (0x50 + 4 * (n))
+#define USERINTERRUPTSTATUS(n) (0x58 + 4 * (n))
+
+#define IRQ_COUNT 49
+#define IRQ_RESERVED 35
+#define REG_NUM 2
+
+struct dc_ic_data {
+ struct regmap *regs;
+ struct clk *clk_axi;
+ int irq[IRQ_COUNT];
+ struct irq_domain *domain;
+};
+
+struct dc_ic_entry {
+ struct dc_ic_data *data;
+ int irq;
+};
+
+static const struct regmap_range dc_ic_regmap_write_ranges[] = {
+ regmap_reg_range(USERINTERRUPTMASK(0), INTERRUPTCLEAR(1)),
+ regmap_reg_range(USERINTERRUPTENABLE(0), USERINTERRUPTCLEAR(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_write_table = {
+ .yes_ranges = dc_ic_regmap_write_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_write_ranges),
+};
+
+static const struct regmap_range dc_ic_regmap_read_ranges[] = {
+ regmap_reg_range(USERINTERRUPTMASK(0), INTERRUPTENABLE(1)),
+ regmap_reg_range(INTERRUPTSTATUS(0), INTERRUPTSTATUS(1)),
+ regmap_reg_range(USERINTERRUPTENABLE(0), USERINTERRUPTENABLE(1)),
+ regmap_reg_range(USERINTERRUPTSTATUS(0), USERINTERRUPTSTATUS(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_read_table = {
+ .yes_ranges = dc_ic_regmap_read_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_read_ranges),
+};
+
+static const struct regmap_range dc_ic_regmap_volatile_ranges[] = {
+ regmap_reg_range(INTERRUPTPRESET(0), INTERRUPTCLEAR(1)),
+ regmap_reg_range(USERINTERRUPTPRESET(0), USERINTERRUPTCLEAR(1)),
+};
+
+static const struct regmap_access_table dc_ic_regmap_volatile_table = {
+ .yes_ranges = dc_ic_regmap_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_ic_regmap_volatile_ranges),
+};
+
+static const struct regmap_config dc_ic_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_ic_regmap_write_table,
+ .rd_table = &dc_ic_regmap_read_table,
+ .volatile_table = &dc_ic_regmap_volatile_table,
+ .max_register = USERINTERRUPTSTATUS(1),
+};
+
+static void dc_ic_irq_handler(struct irq_desc *desc)
+{
+ struct dc_ic_entry *entry = irq_desc_get_handler_data(desc);
+ struct dc_ic_data *data = entry->data;
+ unsigned int status, enable;
+ unsigned int virq;
+
+ chained_irq_enter(irq_desc_get_chip(desc), desc);
+
+ regmap_read(data->regs, USERINTERRUPTSTATUS(entry->irq / 32), &status);
+ regmap_read(data->regs, USERINTERRUPTENABLE(entry->irq / 32), &enable);
+
+ status &= enable;
+
+ if (status & BIT(entry->irq % 32)) {
+ virq = irq_find_mapping(data->domain, entry->irq);
+ if (virq)
+ generic_handle_irq(virq);
+ }
+
+ chained_irq_exit(irq_desc_get_chip(desc), desc);
+}
+
+static const unsigned long unused_irq[REG_NUM] = {0x00000000, 0xfffe0008};
+
+static int dc_ic_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct irq_chip_generic *gc;
+ struct dc_ic_entry *entry;
+ struct irq_chip_type *ct;
+ struct dc_ic_data *data;
+ void __iomem *base;
+ int i, ret;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ entry = devm_kcalloc(dev, IRQ_COUNT, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_err(dev, "failed to initialize reg\n");
+ return PTR_ERR(base);
+ }
+
+ data->regs = devm_regmap_init_mmio(dev, base, &dc_ic_regmap_config);
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ data->clk_axi = devm_clk_get(dev, NULL);
+ if (IS_ERR(data->clk_axi))
+ return dev_err_probe(dev, PTR_ERR(data->clk_axi),
+ "failed to get AXI clock\n");
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ /* skip the reserved IRQ */
+ if (i == IRQ_RESERVED)
+ continue;
+
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ dev_set_drvdata(dev, data);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to get runtime PM sync: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < REG_NUM; i++) {
+ /* mask and clear all interrupts */
+ regmap_write(data->regs, USERINTERRUPTENABLE(i), 0x0);
+ regmap_write(data->regs, INTERRUPTENABLE(i), 0x0);
+ regmap_write(data->regs, USERINTERRUPTCLEAR(i), 0xffffffff);
+ regmap_write(data->regs, INTERRUPTCLEAR(i), 0xffffffff);
+
+ /* set all interrupts to user mode */
+ regmap_write(data->regs, USERINTERRUPTMASK(i), 0xffffffff);
+ }
+
+ data->domain = irq_domain_add_linear(dev->of_node, IRQ_COUNT,
+ &irq_generic_chip_ops, data);
+ if (!data->domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ pm_runtime_put(dev);
+ return -ENOMEM;
+ }
+ irq_domain_set_pm_device(data->domain, dev);
+
+ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, "DC",
+ handle_level_irq, 0, 0, 0);
+ if (ret) {
+ dev_err(dev, "failed to alloc generic IRQ chips: %d\n", ret);
+ irq_domain_remove(data->domain);
+ pm_runtime_put(dev);
+ return ret;
+ }
+
+ for (i = 0; i < IRQ_COUNT; i += 32) {
+ gc = irq_get_domain_generic_chip(data->domain, i);
+ gc->reg_base = base;
+ gc->unused = unused_irq[i / 32];
+ ct = gc->chip_types;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->regs.ack = USERINTERRUPTCLEAR(i / 32);
+ ct->regs.mask = USERINTERRUPTENABLE(i / 32);
+ }
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ /* skip the reserved IRQ */
+ if (i == IRQ_RESERVED)
+ continue;
+
+ data->irq[i] = irq_of_parse_and_map(dev->of_node, i);
+
+ entry[i].data = data;
+ entry[i].irq = i;
+
+ irq_set_chained_handler_and_data(data->irq[i],
+ dc_ic_irq_handler, &entry[i]);
+ }
+
+ return 0;
+}
+
+static void dc_ic_remove(struct platform_device *pdev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ for (i = 0; i < IRQ_COUNT; i++) {
+ if (i == IRQ_RESERVED)
+ continue;
+
+ irq_set_chained_handler_and_data(data->irq[i], NULL, NULL);
+ }
+
+ irq_domain_remove(data->domain);
+
+ pm_runtime_put_sync(&pdev->dev);
+}
+
+static int dc_ic_runtime_suspend(struct device *dev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(data->clk_axi);
+
+ return 0;
+}
+
+static int dc_ic_runtime_resume(struct device *dev)
+{
+ struct dc_ic_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(data->clk_axi);
+ if (ret)
+ dev_err(dev, "failed to enable AXI clock: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dc_ic_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ RUNTIME_PM_OPS(dc_ic_runtime_suspend, dc_ic_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_ic_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-intc", },
+ { /* sentinel */ }
+};
+
+struct platform_driver dc_ic_driver = {
+ .probe = dc_ic_probe,
+ .remove = dc_ic_remove,
+ .driver = {
+ .name = "imx8-dc-intc",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_ic_dt_ids,
+ .pm = pm_sleep_ptr(&dc_ic_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-kms.c b/drivers/gpu/drm/imx/dc/dc-kms.c
new file mode 100644
index 000000000000..2b18aa37a4a8
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-kms.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/of.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mode_config.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-drv.h"
+#include "dc-kms.h"
+
+static const struct drm_mode_config_funcs dc_drm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int dc_kms_init_encoder_per_crtc(struct dc_drm_device *dc_drm,
+ int crtc_index)
+{
+ struct dc_crtc *dc_crtc = &dc_drm->dc_crtc[crtc_index];
+ struct drm_device *drm = &dc_drm->base;
+ struct drm_crtc *crtc = &dc_crtc->base;
+ struct drm_connector *connector;
+ struct device *dev = drm->dev;
+ struct drm_encoder *encoder;
+ struct drm_bridge *bridge;
+ int ret;
+
+ bridge = devm_drm_of_get_bridge(dev, dc_crtc->de->tc->dev->of_node,
+ 0, 0);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ if (ret == -ENODEV)
+ return 0;
+
+ return dev_err_probe(dev, ret,
+ "failed to find bridge for CRTC%u\n",
+ crtc->index);
+ }
+
+ encoder = &dc_drm->encoder[crtc_index];
+ ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
+ if (ret) {
+ dev_err(dev, "failed to initialize encoder for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = drm_bridge_attach(encoder, bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret) {
+ dev_err(dev,
+ "failed to attach bridge to encoder for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ connector = drm_bridge_connector_init(drm, encoder);
+ if (IS_ERR(connector)) {
+ ret = PTR_ERR(connector);
+ dev_err(dev, "failed to init bridge connector for CRTC%u: %d\n",
+ crtc->index, ret);
+ return ret;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret)
+ dev_err(dev,
+ "failed to attach encoder to connector for CRTC%u: %d\n",
+ crtc->index, ret);
+
+ return ret;
+}
+
+int dc_kms_init(struct dc_drm_device *dc_drm)
+{
+ struct drm_device *drm = &dc_drm->base;
+ int ret, i;
+
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
+ drm->mode_config.min_width = 60;
+ drm->mode_config.min_height = 60;
+ drm->mode_config.max_width = 8192;
+ drm->mode_config.max_height = 8192;
+ drm->mode_config.funcs = &dc_drm_mode_config_funcs;
+
+ drm->vblank_disable_immediate = true;
+ drm->max_vblank_count = DC_FRAMEGEN_MAX_FRAME_INDEX;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ ret = dc_crtc_init(dc_drm, i);
+ if (ret)
+ return ret;
+
+ ret = dc_kms_init_encoder_per_crtc(dc_drm, i);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ ret = dc_crtc_post_init(dc_drm, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = drm_vblank_init(drm, DC_DISPLAYS);
+ if (ret) {
+ dev_err(drm->dev, "failed to init vblank support: %d\n", ret);
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ drm_kms_helper_poll_init(drm);
+
+ return 0;
+}
+
+void dc_kms_uninit(struct dc_drm_device *dc_drm)
+{
+ drm_kms_helper_poll_fini(&dc_drm->base);
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-kms.h b/drivers/gpu/drm/imx/dc/dc-kms.h
new file mode 100644
index 000000000000..cd7860eff986
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-kms.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_KMS_H__
+#define __DC_KMS_H__
+
+#include <linux/completion.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_vblank.h>
+
+#include "dc-de.h"
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+#define DC_CRTC_IRQS 5
+
+struct dc_crtc_irq {
+ struct dc_crtc *dc_crtc;
+ unsigned int irq;
+};
+
+/**
+ * struct dc_crtc - DC specific drm_crtc
+ *
+ * Each display controller contains one content stream and one safety stream.
+ * In general, the two streams have the same functionality. One stream is
+ * overlaid on the other by @fg. This driver chooses to generate black constant
+ * color from the content stream as background color, build plane(s) on the
+ * content stream by using layerblend(s) and always generate a constant color
+ * from the safety stream. Note that due to the decoupled timing, the safety
+ * stream still works to show the constant color properly even when the content
+ * stream has completely hung up due to mal-function of this driver.
+ */
+struct dc_crtc {
+ /** @base: base drm_crtc structure */
+ struct drm_crtc base;
+ /** @de: display engine */
+ struct dc_de *de;
+ /** @cf_cont: content stream constframe */
+ struct dc_cf *cf_cont;
+ /** @cf_safe: safety stream constframe */
+ struct dc_cf *cf_safe;
+ /** @ed_cont: content stream extdst */
+ struct dc_ed *ed_cont;
+ /** @ed_safe: safety stream extdst */
+ struct dc_ed *ed_safe;
+ /** @fg: framegen */
+ struct dc_fg *fg;
+ /**
+ * @irq_dec_framecomplete:
+ *
+ * display engine configuration frame complete interrupt
+ */
+ unsigned int irq_dec_framecomplete;
+ /**
+ * @irq_dec_seqcomplete:
+ *
+ * display engine configuration sequence complete interrupt
+ */
+ unsigned int irq_dec_seqcomplete;
+ /**
+ * @irq_dec_shdload:
+ *
+ * display engine configuration shadow load interrupt
+ */
+ unsigned int irq_dec_shdload;
+ /**
+ * @irq_ed_cont_shdload:
+ *
+ * content stream extdst shadow load interrupt
+ */
+ unsigned int irq_ed_cont_shdload;
+ /**
+ * @irq_ed_safe_shdload:
+ *
+ * safety stream extdst shadow load interrupt
+ */
+ unsigned int irq_ed_safe_shdload;
+ /**
+ * @dec_seqcomplete_done:
+ *
+ * display engine configuration sequence completion
+ */
+ struct completion dec_seqcomplete_done;
+ /**
+ * @dec_shdload_done:
+ *
+ * display engine configuration shadow load completion
+ */
+ struct completion dec_shdload_done;
+ /**
+ * @ed_cont_shdload_done:
+ *
+ * content stream extdst shadow load completion
+ */
+ struct completion ed_cont_shdload_done;
+ /**
+ * @ed_safe_shdload_done:
+ *
+ * safety stream extdst shadow load completion
+ */
+ struct completion ed_safe_shdload_done;
+ /** @event: cached pending vblank event */
+ struct drm_pending_vblank_event *event;
+ /** @irqs: interrupt list */
+ struct dc_crtc_irq irqs[DC_CRTC_IRQS];
+};
+
+/**
+ * struct dc_plane - DC specific drm_plane
+ *
+ * Build a plane on content stream with a fetchunit and a layerblend.
+ */
+struct dc_plane {
+ /** @base: base drm_plane structure */
+ struct drm_plane base;
+ /** @fu: fetchunit */
+ struct dc_fu *fu;
+ /** @cf: content stream constframe */
+ struct dc_cf *cf;
+ /** @lb: layerblend */
+ struct dc_lb *lb;
+ /** @ed: content stream extdst */
+ struct dc_ed *ed;
+};
+
+#endif /* __DC_KMS_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-lb.c b/drivers/gpu/drm/imx/dc/dc-lb.c
new file mode 100644
index 000000000000..38f966625d38
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-lb.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_blend.h>
+
+#include "dc-drv.h"
+#include "dc-pe.h"
+
+#define PIXENGCFG_DYNAMIC 0x8
+#define PIXENGCFG_DYNAMIC_PRIM_SEL_MASK GENMASK(5, 0)
+#define PIXENGCFG_DYNAMIC_PRIM_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_PRIM_SEL_MASK, (x))
+#define PIXENGCFG_DYNAMIC_SEC_SEL_MASK GENMASK(13, 8)
+#define PIXENGCFG_DYNAMIC_SEC_SEL(x) \
+ FIELD_PREP(PIXENGCFG_DYNAMIC_SEC_SEL_MASK, (x))
+
+#define STATICCONTROL 0x8
+#define SHDTOKSEL_MASK GENMASK(4, 3)
+#define SHDTOKSEL(x) FIELD_PREP(SHDTOKSEL_MASK, (x))
+#define SHDLDSEL_MASK GENMASK(2, 1)
+#define SHDLDSEL(x) FIELD_PREP(SHDLDSEL_MASK, (x))
+
+#define CONTROL 0xc
+#define CTRL_MODE_MASK BIT(0)
+#define CTRL_MODE(x) FIELD_PREP(CTRL_MODE_MASK, (x))
+
+#define BLENDCONTROL 0x10
+#define ALPHA_MASK GENMASK(23, 16)
+#define ALPHA(x) FIELD_PREP(ALPHA_MASK, (x))
+#define PRIM_C_BLD_FUNC_MASK GENMASK(2, 0)
+#define PRIM_C_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_C_BLD_FUNC_MASK, (x))
+#define SEC_C_BLD_FUNC_MASK GENMASK(6, 4)
+#define SEC_C_BLD_FUNC(x) \
+ FIELD_PREP(SEC_C_BLD_FUNC_MASK, (x))
+#define PRIM_A_BLD_FUNC_MASK GENMASK(10, 8)
+#define PRIM_A_BLD_FUNC(x) \
+ FIELD_PREP(PRIM_A_BLD_FUNC_MASK, (x))
+#define SEC_A_BLD_FUNC_MASK GENMASK(14, 12)
+#define SEC_A_BLD_FUNC(x) \
+ FIELD_PREP(SEC_A_BLD_FUNC_MASK, (x))
+
+#define POSITION 0x14
+#define XPOS_MASK GENMASK(15, 0)
+#define XPOS(x) FIELD_PREP(XPOS_MASK, (x))
+#define YPOS_MASK GENMASK(31, 16)
+#define YPOS(x) FIELD_PREP(YPOS_MASK, (x))
+
+enum dc_lb_blend_func {
+ DC_LAYERBLEND_BLEND_ZERO,
+ DC_LAYERBLEND_BLEND_ONE,
+ DC_LAYERBLEND_BLEND_PRIM_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_PRIM_ALPHA,
+ DC_LAYERBLEND_BLEND_SEC_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_SEC_ALPHA,
+ DC_LAYERBLEND_BLEND_CONST_ALPHA,
+ DC_LAYERBLEND_BLEND_ONE_MINUS_CONST_ALPHA,
+};
+
+enum dc_lb_shadow_sel {
+ BOTH = 0x2,
+};
+
+static const struct dc_subdev_info dc_lb_info[] = {
+ { .reg_start = 0x56180ba0, .id = 0, },
+ { .reg_start = 0x56180bc0, .id = 1, },
+ { .reg_start = 0x56180be0, .id = 2, },
+ { .reg_start = 0x56180c00, .id = 3, },
+};
+
+static const struct regmap_range dc_lb_pec_regmap_access_ranges[] = {
+ regmap_reg_range(PIXENGCFG_DYNAMIC, PIXENGCFG_DYNAMIC),
+};
+
+static const struct regmap_access_table dc_lb_pec_regmap_access_table = {
+ .yes_ranges = dc_lb_pec_regmap_access_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_lb_pec_regmap_access_ranges),
+};
+
+static const struct regmap_config dc_lb_pec_regmap_config = {
+ .name = "pec",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_lb_pec_regmap_access_table,
+ .rd_table = &dc_lb_pec_regmap_access_table,
+ .max_register = PIXENGCFG_DYNAMIC,
+};
+
+static const struct regmap_range dc_lb_regmap_ranges[] = {
+ regmap_reg_range(STATICCONTROL, POSITION),
+};
+
+static const struct regmap_access_table dc_lb_regmap_access_table = {
+ .yes_ranges = dc_lb_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_lb_regmap_ranges),
+};
+
+static const struct regmap_config dc_lb_cfg_regmap_config = {
+ .name = "cfg",
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_lb_regmap_access_table,
+ .rd_table = &dc_lb_regmap_access_table,
+ .max_register = POSITION,
+};
+
+static const enum dc_link_id prim_sels[] = {
+ /* common options */
+ LINK_ID_NONE,
+ LINK_ID_CONSTFRAME0,
+ LINK_ID_CONSTFRAME1,
+ LINK_ID_CONSTFRAME4,
+ LINK_ID_CONSTFRAME5,
+ /*
+ * special options:
+ * layerblend(n) has n special options,
+ * from layerblend0 to layerblend(n - 1), e.g.,
+ * layerblend3 has 3 special options -
+ * layerblend0/1/2.
+ */
+ LINK_ID_LAYERBLEND0,
+ LINK_ID_LAYERBLEND1,
+ LINK_ID_LAYERBLEND2,
+ LINK_ID_LAYERBLEND3,
+};
+
+static const enum dc_link_id sec_sels[] = {
+ LINK_ID_NONE,
+ LINK_ID_FETCHWARP2,
+ LINK_ID_FETCHLAYER0,
+};
+
+enum dc_link_id dc_lb_get_link_id(struct dc_lb *lb)
+{
+ return lb->link;
+}
+
+void dc_lb_pec_dynamic_prim_sel(struct dc_lb *lb, enum dc_link_id prim)
+{
+ int fixed_sels_num = ARRAY_SIZE(prim_sels) - 4;
+ int i;
+
+ for (i = 0; i < fixed_sels_num + lb->id; i++) {
+ if (prim_sels[i] == prim) {
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC,
+ PIXENGCFG_DYNAMIC_PRIM_SEL_MASK,
+ PIXENGCFG_DYNAMIC_PRIM_SEL(prim));
+ return;
+ }
+ }
+
+ dev_warn(lb->dev, "invalid primary input selection:%d\n", prim);
+}
+
+void dc_lb_pec_dynamic_sec_sel(struct dc_lb *lb, enum dc_link_id sec)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sec_sels); i++) {
+ if (sec_sels[i] == sec) {
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC,
+ PIXENGCFG_DYNAMIC_SEC_SEL_MASK,
+ PIXENGCFG_DYNAMIC_SEC_SEL(sec));
+ return;
+ }
+ }
+
+ dev_warn(lb->dev, "invalid secondary input selection:%d\n", sec);
+}
+
+void dc_lb_pec_clken(struct dc_lb *lb, enum dc_pec_clken clken)
+{
+ regmap_write_bits(lb->reg_pec, PIXENGCFG_DYNAMIC, CLKEN_MASK,
+ CLKEN(clken));
+}
+
+static inline void dc_lb_enable_shden(struct dc_lb *lb)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDEN, SHDEN);
+}
+
+static inline void dc_lb_shdtoksel(struct dc_lb *lb, enum dc_lb_shadow_sel sel)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDTOKSEL_MASK,
+ SHDTOKSEL(sel));
+}
+
+static inline void dc_lb_shdldsel(struct dc_lb *lb, enum dc_lb_shadow_sel sel)
+{
+ regmap_write_bits(lb->reg_cfg, STATICCONTROL, SHDLDSEL_MASK,
+ SHDLDSEL(sel));
+}
+
+void dc_lb_mode(struct dc_lb *lb, enum dc_lb_mode mode)
+{
+ regmap_write_bits(lb->reg_cfg, CONTROL, CTRL_MODE_MASK, mode);
+}
+
+static inline void dc_lb_blendcontrol(struct dc_lb *lb)
+{
+ u32 val = PRIM_A_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ SEC_A_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ PRIM_C_BLD_FUNC(DC_LAYERBLEND_BLEND_ZERO) |
+ SEC_C_BLD_FUNC(DC_LAYERBLEND_BLEND_CONST_ALPHA) |
+ ALPHA(DRM_BLEND_ALPHA_OPAQUE >> 8);
+
+ regmap_write(lb->reg_cfg, BLENDCONTROL, val);
+}
+
+void dc_lb_position(struct dc_lb *lb, int x, int y)
+{
+ regmap_write(lb->reg_cfg, POSITION, XPOS(x) | YPOS(y));
+}
+
+int dc_lb_get_id(struct dc_lb *lb)
+{
+ return lb->id;
+}
+
+void dc_lb_init(struct dc_lb *lb)
+{
+ dc_lb_pec_dynamic_prim_sel(lb, LINK_ID_NONE);
+ dc_lb_pec_dynamic_sec_sel(lb, LINK_ID_NONE);
+ dc_lb_pec_clken(lb, CLKEN_DISABLE);
+ dc_lb_shdldsel(lb, BOTH);
+ dc_lb_shdtoksel(lb, BOTH);
+ dc_lb_blendcontrol(lb);
+ dc_lb_enable_shden(lb);
+}
+
+static int dc_lb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res_pec;
+ void __iomem *base_pec;
+ void __iomem *base_cfg;
+ struct dc_lb *lb;
+
+ lb = devm_kzalloc(dev, sizeof(*lb), GFP_KERNEL);
+ if (!lb)
+ return -ENOMEM;
+
+ base_pec = devm_platform_get_and_ioremap_resource(pdev, 0, &res_pec);
+ if (IS_ERR(base_pec))
+ return PTR_ERR(base_pec);
+
+ base_cfg = devm_platform_ioremap_resource_byname(pdev, "cfg");
+ if (IS_ERR(base_cfg))
+ return PTR_ERR(base_cfg);
+
+ lb->reg_pec = devm_regmap_init_mmio(dev, base_pec,
+ &dc_lb_pec_regmap_config);
+ if (IS_ERR(lb->reg_pec))
+ return PTR_ERR(lb->reg_pec);
+
+ lb->reg_cfg = devm_regmap_init_mmio(dev, base_cfg,
+ &dc_lb_cfg_regmap_config);
+ if (IS_ERR(lb->reg_cfg))
+ return PTR_ERR(lb->reg_cfg);
+
+ lb->id = dc_subdev_get_id(dc_lb_info, ARRAY_SIZE(dc_lb_info), res_pec);
+ if (lb->id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", lb->id);
+ return lb->id;
+ }
+
+ lb->dev = dev;
+ lb->link = LINK_ID_LAYERBLEND0 + lb->id;
+
+ dc_drm->lb[lb->id] = lb;
+
+ return 0;
+}
+
+static const struct component_ops dc_lb_ops = {
+ .bind = dc_lb_bind,
+};
+
+static int dc_lb_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_lb_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_lb_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_lb_ops);
+}
+
+static const struct of_device_id dc_lb_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-layerblend" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_lb_dt_ids);
+
+struct platform_driver dc_lb_driver = {
+ .probe = dc_lb_probe,
+ .remove = dc_lb_remove,
+ .driver = {
+ .name = "imx8-dc-layerblend",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_lb_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-pe.c b/drivers/gpu/drm/imx/dc/dc-pe.c
new file mode 100644
index 000000000000..6676c22f3f45
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-pe.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+#include "dc-pe.h"
+
+static int dc_pe_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dc_drm_device *dc_drm = data;
+ struct dc_pe *pe;
+ int ret;
+
+ pe = devm_kzalloc(dev, sizeof(*pe), GFP_KERNEL);
+ if (!pe)
+ return -ENOMEM;
+
+ pe->clk_axi = devm_clk_get(dev, NULL);
+ if (IS_ERR(pe->clk_axi))
+ return dev_err_probe(dev, PTR_ERR(pe->clk_axi),
+ "failed to get AXI clock\n");
+
+ pe->dev = dev;
+
+ dev_set_drvdata(dev, pe);
+
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ dc_drm->pe = pe;
+
+ return 0;
+}
+
+/*
+ * It's possible to get the child device pointers from the child component
+ * bind callbacks, but it depends on the component helper behavior to bind
+ * the pixel engine component first. To avoid the dependency, post bind to
+ * get the pointers from dc_drm in a safe manner.
+ */
+void dc_pe_post_bind(struct dc_drm_device *dc_drm)
+{
+ struct dc_pe *pe = dc_drm->pe;
+ int i;
+
+ for (i = 0; i < DC_DISPLAYS; i++) {
+ pe->cf_safe[i] = dc_drm->cf_safe[i];
+ pe->cf_cont[i] = dc_drm->cf_cont[i];
+ pe->ed_safe[i] = dc_drm->ed_safe[i];
+ pe->ed_cont[i] = dc_drm->ed_cont[i];
+ }
+
+ for (i = 0; i < DC_DISP_FU_CNT; i++)
+ pe->fu_disp[i] = dc_drm->fu_disp[i];
+
+ for (i = 0; i < DC_LB_CNT; i++)
+ pe->lb[i] = dc_drm->lb[i];
+}
+
+static const struct component_ops dc_pe_ops = {
+ .bind = dc_pe_bind,
+};
+
+static int dc_pe_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = component_add(&pdev->dev, &dc_pe_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_pe_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_pe_ops);
+}
+
+static int dc_pe_runtime_suspend(struct device *dev)
+{
+ struct dc_pe *pe = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pe->clk_axi);
+
+ return 0;
+}
+
+static int dc_pe_runtime_resume(struct device *dev)
+{
+ struct dc_pe *pe = dev_get_drvdata(dev);
+ int i, ret;
+
+ ret = clk_prepare_enable(pe->clk_axi);
+ if (ret) {
+ dev_err(dev, "failed to enable AXI clock: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pe->cf_safe); i++)
+ dc_cf_init(pe->cf_safe[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->cf_cont); i++)
+ dc_cf_init(pe->cf_cont[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->ed_safe); i++)
+ dc_ed_init(pe->ed_safe[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->ed_cont); i++)
+ dc_ed_init(pe->ed_cont[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->fu_disp); i++)
+ pe->fu_disp[i]->ops.init(pe->fu_disp[i]);
+
+ for (i = 0; i < ARRAY_SIZE(pe->lb); i++)
+ dc_lb_init(pe->lb[i]);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dc_pe_pm_ops = {
+ RUNTIME_PM_OPS(dc_pe_runtime_suspend, dc_pe_runtime_resume, NULL)
+};
+
+static const struct of_device_id dc_pe_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-pixel-engine", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_pe_dt_ids);
+
+struct platform_driver dc_pe_driver = {
+ .probe = dc_pe_probe,
+ .remove = dc_pe_remove,
+ .driver = {
+ .name = "imx8-dc-pixel-engine",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_pe_dt_ids,
+ .pm = pm_sleep_ptr(&dc_pe_pm_ops),
+ },
+};
diff --git a/drivers/gpu/drm/imx/dc/dc-pe.h b/drivers/gpu/drm/imx/dc/dc-pe.h
new file mode 100644
index 000000000000..f5e01a6eb9e9
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-pe.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2024 NXP
+ */
+
+#ifndef __DC_PIXEL_ENGINE_H__
+#define __DC_PIXEL_ENGINE_H__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/regmap.h>
+
+#include "dc-de.h"
+
+#define SHDEN BIT(0)
+
+#define CLKEN_MASK_SHIFT 24
+#define CLKEN_MASK (0x3 << CLKEN_MASK_SHIFT)
+#define CLKEN(n) ((n) << CLKEN_MASK_SHIFT)
+
+#define DC_DISP_FU_CNT 2
+#define DC_LB_CNT 4
+
+enum dc_link_id {
+ LINK_ID_NONE = 0x00,
+ LINK_ID_CONSTFRAME0 = 0x0c,
+ LINK_ID_CONSTFRAME4 = 0x0e,
+ LINK_ID_CONSTFRAME1 = 0x10,
+ LINK_ID_CONSTFRAME5 = 0x12,
+ LINK_ID_FETCHWARP2 = 0x14,
+ LINK_ID_FETCHLAYER0 = 0x1a,
+ LINK_ID_LAYERBLEND0 = 0x21,
+ LINK_ID_LAYERBLEND1 = 0x22,
+ LINK_ID_LAYERBLEND2 = 0x23,
+ LINK_ID_LAYERBLEND3 = 0x24,
+};
+
+enum dc_lb_mode {
+ LB_NEUTRAL, /* Output is same as primary input. */
+ LB_BLEND,
+};
+
+enum dc_pec_clken {
+ CLKEN_DISABLE,
+ CLKEN_AUTOMATIC,
+};
+
+struct dc_cf {
+ struct regmap *reg_cfg;
+ enum dc_link_id link;
+};
+
+struct dc_ed {
+ struct device *dev;
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ int irq_shdload;
+};
+
+struct dc_lb {
+ struct device *dev;
+ struct regmap *reg_pec;
+ struct regmap *reg_cfg;
+ int id;
+ enum dc_link_id link;
+};
+
+struct dc_pe {
+ struct device *dev;
+ struct clk *clk_axi;
+ struct dc_cf *cf_safe[DC_DISPLAYS];
+ struct dc_cf *cf_cont[DC_DISPLAYS];
+ struct dc_ed *ed_safe[DC_DISPLAYS];
+ struct dc_ed *ed_cont[DC_DISPLAYS];
+ struct dc_fu *fu_disp[DC_DISP_FU_CNT];
+ struct dc_lb *lb[DC_LB_CNT];
+};
+
+/* Constant Frame Unit */
+enum dc_link_id dc_cf_get_link_id(struct dc_cf *cf);
+void dc_cf_framedimensions(struct dc_cf *cf, unsigned int w, unsigned int h);
+void dc_cf_constantcolor_black(struct dc_cf *cf);
+void dc_cf_constantcolor_blue(struct dc_cf *cf);
+void dc_cf_init(struct dc_cf *cf);
+
+/* External Destination Unit */
+void dc_ed_pec_src_sel(struct dc_ed *ed, enum dc_link_id src);
+void dc_ed_pec_sync_trigger(struct dc_ed *ed);
+void dc_ed_init(struct dc_ed *ed);
+
+/* Layer Blend Unit */
+enum dc_link_id dc_lb_get_link_id(struct dc_lb *lb);
+void dc_lb_pec_dynamic_prim_sel(struct dc_lb *lb, enum dc_link_id prim);
+void dc_lb_pec_dynamic_sec_sel(struct dc_lb *lb, enum dc_link_id sec);
+void dc_lb_pec_clken(struct dc_lb *lb, enum dc_pec_clken clken);
+void dc_lb_mode(struct dc_lb *lb, enum dc_lb_mode mode);
+void dc_lb_position(struct dc_lb *lb, int x, int y);
+int dc_lb_get_id(struct dc_lb *lb);
+void dc_lb_init(struct dc_lb *lb);
+
+#endif /* __DC_PIXEL_ENGINE_H__ */
diff --git a/drivers/gpu/drm/imx/dc/dc-plane.c b/drivers/gpu/drm/imx/dc/dc-plane.c
new file mode 100644
index 000000000000..d8b946fb90de
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-plane.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/container_of.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_dma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
+
+#include "dc-drv.h"
+#include "dc-fu.h"
+#include "dc-kms.h"
+
+#define DC_PLANE_MAX_PITCH 0x10000
+#define DC_PLANE_MAX_PIX_CNT 8192
+
+#define dc_plane_dbg(plane, fmt, ...) \
+do { \
+ struct drm_plane *_plane = (plane); \
+ drm_dbg_kms(_plane->dev, "[PLANE:%d:%s] " fmt, \
+ _plane->base.id, _plane->name, ##__VA_ARGS__); \
+} while (0)
+
+static const uint32_t dc_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+};
+
+static const struct drm_plane_funcs dc_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_plane_cleanup,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static inline struct dc_plane *to_dc_plane(struct drm_plane *plane)
+{
+ return container_of(plane, struct dc_plane, base);
+}
+
+static int dc_plane_check_max_source_resolution(struct drm_plane_state *state)
+{
+ int src_h = drm_rect_height(&state->src) >> 16;
+ int src_w = drm_rect_width(&state->src) >> 16;
+
+ if (src_w > DC_PLANE_MAX_PIX_CNT || src_h > DC_PLANE_MAX_PIX_CNT) {
+ dc_plane_dbg(state->plane, "invalid source resolution\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dc_plane_check_fb(struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ dma_addr_t baseaddr = drm_fb_dma_get_gem_addr(fb, state, 0);
+
+ /* base address alignment */
+ if (baseaddr & 0x3) {
+ dc_plane_dbg(state->plane, "fb bad baddr alignment\n");
+ return -EINVAL;
+ }
+
+ /* pitches[0] range */
+ if (fb->pitches[0] > DC_PLANE_MAX_PITCH) {
+ dc_plane_dbg(state->plane, "fb pitches[0] is out of range\n");
+ return -EINVAL;
+ }
+
+ /* pitches[0] alignment */
+ if (fb->pitches[0] & 0x3) {
+ dc_plane_dbg(state->plane, "fb bad pitches[0] alignment\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+dc_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ /* ok to disable */
+ if (!plane_state->fb)
+ return 0;
+
+ if (!plane_state->crtc) {
+ dc_plane_dbg(plane, "no CRTC in plane state\n");
+ return -EINVAL;
+ }
+
+ crtc_state =
+ drm_atomic_get_existing_crtc_state(state, plane_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+
+ ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, false);
+ if (ret) {
+ dc_plane_dbg(plane, "failed to check plane state: %d\n", ret);
+ return ret;
+ }
+
+ ret = dc_plane_check_max_source_resolution(plane_state);
+ if (ret)
+ return ret;
+
+ return dc_plane_check_fb(plane_state);
+}
+
+static void
+dc_plane_atomic_update(struct drm_plane *plane, struct drm_atomic_state *state)
+{
+ struct drm_plane_state *new_state =
+ drm_atomic_get_new_plane_state(state, plane);
+ struct dc_plane *dplane = to_dc_plane(plane);
+ struct drm_framebuffer *fb = new_state->fb;
+ const struct dc_fu_ops *fu_ops;
+ struct dc_lb *lb = dplane->lb;
+ struct dc_fu *fu = dplane->fu;
+ dma_addr_t baseaddr;
+ int src_w, src_h;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
+
+ src_w = drm_rect_width(&new_state->src) >> 16;
+ src_h = drm_rect_height(&new_state->src) >> 16;
+
+ baseaddr = drm_fb_dma_get_gem_addr(fb, new_state, 0);
+
+ fu_ops = dc_fu_get_ops(dplane->fu);
+
+ fu_ops->set_layerblend(fu, lb);
+ fu_ops->set_burstlength(fu, baseaddr);
+ fu_ops->set_src_stride(fu, DC_FETCHUNIT_FRAC0, fb->pitches[0]);
+ fu_ops->set_src_buf_dimensions(fu, DC_FETCHUNIT_FRAC0, src_w, src_h);
+ fu_ops->set_fmt(fu, DC_FETCHUNIT_FRAC0, fb->format);
+ fu_ops->set_framedimensions(fu, src_w, src_h);
+ fu_ops->set_baseaddress(fu, DC_FETCHUNIT_FRAC0, baseaddr);
+ fu_ops->enable_src_buf(fu, DC_FETCHUNIT_FRAC0);
+
+ dc_plane_dbg(plane, "uses %s\n", fu_ops->get_name(fu));
+
+ dc_lb_pec_dynamic_prim_sel(lb, dc_cf_get_link_id(dplane->cf));
+ dc_lb_pec_dynamic_sec_sel(lb, fu_ops->get_link_id(fu));
+ dc_lb_mode(lb, LB_BLEND);
+ dc_lb_position(lb, new_state->dst.x1, new_state->dst.y1);
+ dc_lb_pec_clken(lb, CLKEN_AUTOMATIC);
+
+ dc_plane_dbg(plane, "uses LayerBlend%d\n", dc_lb_get_id(lb));
+
+ /* set ExtDst's source to LayerBlend */
+ dc_ed_pec_src_sel(dplane->ed, dc_lb_get_link_id(lb));
+
+ drm_dev_exit(idx);
+}
+
+static void dc_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct dc_plane *dplane = to_dc_plane(plane);
+ const struct dc_fu_ops *fu_ops;
+ int idx;
+
+ if (!drm_dev_enter(plane->dev, &idx))
+ return;
+
+ /* disable fetchunit in shadow */
+ fu_ops = dc_fu_get_ops(dplane->fu);
+ fu_ops->disable_src_buf(dplane->fu, DC_FETCHUNIT_FRAC0);
+
+ /* set ExtDst's source to ConstFrame */
+ dc_ed_pec_src_sel(dplane->ed, dc_cf_get_link_id(dplane->cf));
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_plane_helper_funcs dc_plane_helper_funcs = {
+ .atomic_check = dc_plane_atomic_check,
+ .atomic_update = dc_plane_atomic_update,
+ .atomic_disable = dc_plane_atomic_disable,
+};
+
+int dc_plane_init(struct dc_drm_device *dc_drm, struct dc_plane *dc_plane)
+{
+ struct drm_plane *plane = &dc_plane->base;
+ int ret;
+
+ ret = drm_universal_plane_init(&dc_drm->base, plane, 0, &dc_plane_funcs,
+ dc_plane_formats,
+ ARRAY_SIZE(dc_plane_formats),
+ NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
+ if (ret)
+ return ret;
+
+ drm_plane_helper_add(plane, &dc_plane_helper_funcs);
+
+ dc_plane->fu = dc_drm->pe->fu_disp[plane->index];
+ dc_plane->cf = dc_drm->pe->cf_cont[plane->index];
+ dc_plane->lb = dc_drm->pe->lb[plane->index];
+ dc_plane->ed = dc_drm->pe->ed_cont[plane->index];
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/imx/dc/dc-tc.c b/drivers/gpu/drm/imx/dc/dc-tc.c
new file mode 100644
index 000000000000..0bfd381b2cea
--- /dev/null
+++ b/drivers/gpu/drm/imx/dc/dc-tc.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2024 NXP
+ */
+
+#include <linux/component.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "dc-drv.h"
+#include "dc-de.h"
+
+#define TCON_CTRL 0x410
+#define CTRL_RST_VAL 0x01401408
+
+/* red: MAPBIT 29-20, green: MAPBIT 19-10, blue: MAPBIT 9-0 */
+#define MAPBIT3_0 0x418
+#define MAPBIT7_4 0x41c
+#define MAPBIT11_8 0x420
+#define MAPBIT15_12 0x424
+#define MAPBIT19_16 0x428
+#define MAPBIT23_20 0x42c
+#define MAPBIT27_24 0x430
+#define MAPBIT31_28 0x434
+
+static const struct dc_subdev_info dc_tc_info[] = {
+ { .reg_start = 0x5618c800, .id = 0, },
+ { .reg_start = 0x5618e400, .id = 1, },
+};
+
+static const struct regmap_range dc_tc_regmap_ranges[] = {
+ regmap_reg_range(TCON_CTRL, TCON_CTRL),
+ regmap_reg_range(MAPBIT3_0, MAPBIT31_28),
+};
+
+static const struct regmap_access_table dc_tc_regmap_access_table = {
+ .yes_ranges = dc_tc_regmap_ranges,
+ .n_yes_ranges = ARRAY_SIZE(dc_tc_regmap_ranges),
+};
+
+static const struct regmap_config dc_tc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+ .wr_table = &dc_tc_regmap_access_table,
+ .rd_table = &dc_tc_regmap_access_table,
+ .max_register = MAPBIT31_28,
+};
+
+/*
+ * The pixels reach TCON are always in 30-bit BGR format.
+ * The first bridge always receives pixels in 30-bit RGB format.
+ * So, map the format to MEDIA_BUS_FMT_RGB101010_1X30.
+ */
+static const u32 dc_tc_mapbit[] = {
+ 0x17161514, 0x1b1a1918, 0x0b0a1d1c, 0x0f0e0d0c,
+ 0x13121110, 0x03020100, 0x07060504, 0x00000908,
+};
+
+void dc_tc_init(struct dc_tc *tc)
+{
+ /* reset TCON_CTRL to POR default so that TCON works in bypass mode */
+ regmap_write(tc->reg, TCON_CTRL, CTRL_RST_VAL);
+
+ /* set format */
+ regmap_bulk_write(tc->reg, MAPBIT3_0, dc_tc_mapbit,
+ ARRAY_SIZE(dc_tc_mapbit));
+}
+
+static int dc_tc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dc_drm_device *dc_drm = data;
+ struct resource *res;
+ void __iomem *base;
+ struct dc_tc *tc;
+ int id;
+
+ tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
+ if (!tc)
+ return -ENOMEM;
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ tc->reg = devm_regmap_init_mmio(dev, base, &dc_tc_regmap_config);
+ if (IS_ERR(tc->reg))
+ return PTR_ERR(tc->reg);
+
+ id = dc_subdev_get_id(dc_tc_info, ARRAY_SIZE(dc_tc_info), res);
+ if (id < 0) {
+ dev_err(dev, "failed to get instance number: %d\n", id);
+ return id;
+ }
+
+ tc->dev = dev;
+ dc_drm->tc[id] = tc;
+
+ return 0;
+}
+
+static const struct component_ops dc_tc_ops = {
+ .bind = dc_tc_bind,
+};
+
+static int dc_tc_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = component_add(&pdev->dev, &dc_tc_ops);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "failed to add component\n");
+
+ return 0;
+}
+
+static void dc_tc_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dc_tc_ops);
+}
+
+static const struct of_device_id dc_tc_dt_ids[] = {
+ { .compatible = "fsl,imx8qxp-dc-tcon" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, dc_tc_dt_ids);
+
+struct platform_driver dc_tc_driver = {
+ .probe = dc_tc_probe,
+ .remove = dc_tc_remove,
+ .driver = {
+ .name = "imx8-dc-tcon",
+ .suppress_bind_attrs = true,
+ .of_match_table = dc_tc_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c
index 31b5a3e21911..0e9dba1ef4af 100644
--- a/drivers/gpu/drm/lib/drm_random.c
+++ b/drivers/gpu/drm/lib/drm_random.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bitops.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 5deec673c11e..9722b847a539 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -341,7 +341,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
err = lima_sched_task_init(
submit->task, submit->ctx->context + submit->pipe,
- bos, submit->nr_bos, vm);
+ bos, submit->nr_bos, vm, file->client_id);
if (err)
goto err_out1;
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 7934098e651b..954f4325b859 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -113,7 +113,8 @@ static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sch
int lima_sched_task_init(struct lima_sched_task *task,
struct lima_sched_context *context,
struct lima_bo **bos, int num_bos,
- struct lima_vm *vm)
+ struct lima_vm *vm,
+ u64 drm_client_id)
{
int err, i;
@@ -124,7 +125,8 @@ int lima_sched_task_init(struct lima_sched_task *task,
for (i = 0; i < num_bos; i++)
drm_gem_object_get(&bos[i]->base.base);
- err = drm_sched_job_init(&task->base, &context->base, 1, vm);
+ err = drm_sched_job_init(&task->base, &context->base, 1, vm,
+ drm_client_id);
if (err) {
kfree(task->bos);
return err;
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 85b23ba901d5..1a08faf8a529 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -88,7 +88,8 @@ struct lima_sched_pipe {
int lima_sched_task_init(struct lima_sched_task *task,
struct lima_sched_context *context,
struct lima_bo **bos, int num_bos,
- struct lima_vm *vm);
+ struct lima_vm *vm,
+ u64 drm_client_id);
void lima_sched_task_fini(struct lima_sched_task *task);
int lima_sched_context_init(struct lima_sched_pipe *pipe,
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
index 3a349d10304e..98a7fb2fa00e 100644
--- a/drivers/gpu/drm/lima/lima_trace.h
+++ b/drivers/gpu/drm/lima/lima_trace.h
@@ -14,21 +14,19 @@ DECLARE_EVENT_CLASS(lima_task,
TP_PROTO(struct lima_sched_task *task),
TP_ARGS(task),
TP_STRUCT__entry(
- __field(uint64_t, task_id)
__field(unsigned int, context)
__field(unsigned int, seqno)
__string(pipe, task->base.sched->name)
),
TP_fast_assign(
- __entry->task_id = task->base.id;
__entry->context = task->base.s_fence->finished.context;
__entry->seqno = task->base.s_fence->finished.seqno;
__assign_str(pipe);
),
- TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
- __entry->task_id, __entry->context, __entry->seqno,
+ TP_printk("context=%u seqno=%u pipe=%s",
+ __entry->context, __entry->seqno,
__get_str(pipe))
);
diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c
index 58279ddaab3c..a5b10b2545dc 100644
--- a/drivers/gpu/drm/mediatek/mtk_dp.c
+++ b/drivers/gpu/drm/mediatek/mtk_dp.c
@@ -2725,9 +2725,10 @@ static int mtk_dp_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
- mtk_dp = devm_kzalloc(dev, sizeof(*mtk_dp), GFP_KERNEL);
- if (!mtk_dp)
- return -ENOMEM;
+ mtk_dp = devm_drm_bridge_alloc(dev, struct mtk_dp, bridge,
+ &mtk_dp_bridge_funcs);
+ if (IS_ERR(mtk_dp))
+ return PTR_ERR(mtk_dp);
mtk_dp->dev = dev;
mtk_dp->data = (struct mtk_dp_data *)of_device_get_match_data(dev);
@@ -2785,7 +2786,6 @@ static int mtk_dp_probe(struct platform_device *pdev)
if (ret)
return ret;
- mtk_dp->bridge.funcs = &mtk_dp_bridge_funcs;
mtk_dp->bridge.of_node = dev->of_node;
mtk_dp->bridge.type = mtk_dp->data->bridge_type;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 6fb85bc6487a..8b58421bc39d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -1266,9 +1266,10 @@ static int mtk_dpi_probe(struct platform_device *pdev)
struct mtk_dpi *dpi;
int ret;
- dpi = devm_kzalloc(dev, sizeof(*dpi), GFP_KERNEL);
- if (!dpi)
- return -ENOMEM;
+ dpi = devm_drm_bridge_alloc(dev, struct mtk_dpi, bridge,
+ &mtk_dpi_bridge_funcs);
+ if (IS_ERR(dpi))
+ return PTR_ERR(dpi);
dpi->dev = dev;
dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
@@ -1320,7 +1321,6 @@ static int mtk_dpi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dpi);
- dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
dpi->bridge.of_node = dev->of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 4fe1f38a3c4b..d7726091819c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -1196,9 +1196,10 @@ static int mtk_dsi_probe(struct platform_device *pdev)
int irq_num;
int ret;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct mtk_dsi, bridge,
+ &mtk_dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->driver_data = of_device_get_match_data(dev);
@@ -1246,7 +1247,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dsi);
- dsi->bridge.funcs = &mtk_dsi_bridge_funcs;
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 8803cd4a8bc9..6943cdc77dec 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1642,9 +1642,10 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int ret;
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(dev, struct mtk_hdmi, bridge,
+ &mtk_hdmi_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->dev = dev;
hdmi->conf = of_device_get_match_data(dev);
@@ -1666,7 +1667,6 @@ static int mtk_hdmi_probe(struct platform_device *pdev)
return dev_err_probe(dev, ret,
"Failed to register audio driver\n");
- hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
hdmi->bridge.of_node = pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index c9678dc68fa1..dc374bfc5951 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -227,9 +227,12 @@ int meson_encoder_cvbs_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_cvbs = devm_kzalloc(priv->dev, sizeof(*meson_encoder_cvbs), GFP_KERNEL);
- if (!meson_encoder_cvbs)
- return -ENOMEM;
+ meson_encoder_cvbs = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_cvbs,
+ bridge,
+ &meson_encoder_cvbs_bridge_funcs);
+ if (IS_ERR(meson_encoder_cvbs))
+ return PTR_ERR(meson_encoder_cvbs);
/* CVBS Connector Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 0, 0);
@@ -245,7 +248,6 @@ int meson_encoder_cvbs_probe(struct meson_drm *priv)
"Failed to find CVBS Connector bridge\n");
/* CVBS Encoder Bridge */
- meson_encoder_cvbs->bridge.funcs = &meson_encoder_cvbs_bridge_funcs;
meson_encoder_cvbs->bridge.of_node = priv->dev->of_node;
meson_encoder_cvbs->bridge.type = DRM_MODE_CONNECTOR_Composite;
meson_encoder_cvbs->bridge.ops = DRM_BRIDGE_OP_MODES;
diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c
index 3db518e5f95d..6c6624f9ba24 100644
--- a/drivers/gpu/drm/meson/meson_encoder_dsi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c
@@ -106,9 +106,12 @@ int meson_encoder_dsi_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_dsi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_dsi), GFP_KERNEL);
- if (!meson_encoder_dsi)
- return -ENOMEM;
+ meson_encoder_dsi = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_dsi,
+ bridge,
+ &meson_encoder_dsi_bridge_funcs);
+ if (IS_ERR(meson_encoder_dsi))
+ return PTR_ERR(meson_encoder_dsi);
/* DSI Transceiver Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 2, 0);
@@ -123,7 +126,6 @@ int meson_encoder_dsi_probe(struct meson_drm *priv)
"Failed to find DSI transceiver bridge\n");
/* DSI Encoder Bridge */
- meson_encoder_dsi->bridge.funcs = &meson_encoder_dsi_bridge_funcs;
meson_encoder_dsi->bridge.of_node = priv->dev->of_node;
meson_encoder_dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index 47136bbbe8c6..8205ee56a691 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -109,7 +109,7 @@ static void meson_encoder_hdmi_set_vclk(struct meson_encoder_hdmi *encoder_hdmi,
venc_freq /= 2;
dev_dbg(priv->dev,
- "vclk:%lluHz phy=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
+ "phy:%lluHz vclk=%lluHz venc=%lluHz hdmi=%lluHz enci=%d\n",
phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
@@ -376,9 +376,12 @@ int meson_encoder_hdmi_probe(struct meson_drm *priv)
struct device_node *remote;
int ret;
- meson_encoder_hdmi = devm_kzalloc(priv->dev, sizeof(*meson_encoder_hdmi), GFP_KERNEL);
- if (!meson_encoder_hdmi)
- return -ENOMEM;
+ meson_encoder_hdmi = devm_drm_bridge_alloc(priv->dev,
+ struct meson_encoder_hdmi,
+ bridge,
+ &meson_encoder_hdmi_bridge_funcs);
+ if (IS_ERR(meson_encoder_hdmi))
+ return PTR_ERR(meson_encoder_hdmi);
/* HDMI Transceiver Bridge */
remote = of_graph_get_remote_node(priv->dev->of_node, 1, 0);
@@ -395,7 +398,6 @@ int meson_encoder_hdmi_probe(struct meson_drm *priv)
}
/* HDMI Encoder Bridge */
- meson_encoder_hdmi->bridge.funcs = &meson_encoder_hdmi_bridge_funcs;
meson_encoder_hdmi->bridge.of_node = priv->dev->of_node;
meson_encoder_hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
meson_encoder_hdmi->bridge.interlace_allowed = true;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index 3325580d885d..dfe0c28a0f05 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -110,10 +110,7 @@
#define HDMI_PLL_LOCK BIT(31)
#define HDMI_PLL_LOCK_G12A (3 << 30)
-#define PIXEL_FREQ_1000_1001(_freq) \
- DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
-#define PHY_FREQ_1000_1001(_freq) \
- (PIXEL_FREQ_1000_1001(DIV_ROUND_DOWN_ULL(_freq, 10ULL)) * 10)
+#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST_ULL((_freq) * 1000ULL, 1001ULL)
/* VID PLL Dividers */
enum {
@@ -772,6 +769,36 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
pll_freq);
}
+static bool meson_vclk_freqs_are_matching_param(unsigned int idx,
+ unsigned long long phy_freq,
+ unsigned long long vclk_freq)
+{
+ DRM_DEBUG_DRIVER("i = %d vclk_freq = %lluHz alt = %lluHz\n",
+ idx, params[idx].vclk_freq,
+ FREQ_1000_1001(params[idx].vclk_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
+ idx, params[idx].phy_freq,
+ FREQ_1000_1001(params[idx].phy_freq));
+
+ /* Match strict frequency */
+ if (phy_freq == params[idx].phy_freq &&
+ vclk_freq == params[idx].vclk_freq)
+ return true;
+
+ /* Match 1000/1001 variant: vclk deviation has to be less than 1kHz
+ * (drm EDID is defined in 1kHz steps, so everything smaller must be
+ * rounding error) and the PHY freq deviation has to be less than
+ * 10kHz (as the TMDS clock is 10 times the pixel clock, so anything
+ * smaller must be rounding error as well).
+ */
+ if (abs(vclk_freq - FREQ_1000_1001(params[idx].vclk_freq)) < 1000 &&
+ abs(phy_freq - FREQ_1000_1001(params[idx].phy_freq)) < 10000)
+ return true;
+
+ /* no match */
+ return false;
+}
+
enum drm_mode_status
meson_vclk_vic_supported_freq(struct meson_drm *priv,
unsigned long long phy_freq,
@@ -790,19 +817,7 @@ meson_vclk_vic_supported_freq(struct meson_drm *priv,
}
for (i = 0 ; params[i].pixel_freq ; ++i) {
- DRM_DEBUG_DRIVER("i = %d pixel_freq = %lluHz alt = %lluHz\n",
- i, params[i].pixel_freq,
- PIXEL_FREQ_1000_1001(params[i].pixel_freq));
- DRM_DEBUG_DRIVER("i = %d phy_freq = %lluHz alt = %lluHz\n",
- i, params[i].phy_freq,
- PHY_FREQ_1000_1001(params[i].phy_freq));
- /* Match strict frequency */
- if (phy_freq == params[i].phy_freq &&
- vclk_freq == params[i].vclk_freq)
- return MODE_OK;
- /* Match 1000/1001 variant */
- if (phy_freq == PHY_FREQ_1000_1001(params[i].phy_freq) &&
- vclk_freq == PIXEL_FREQ_1000_1001(params[i].vclk_freq))
+ if (meson_vclk_freqs_are_matching_param(i, phy_freq, vclk_freq))
return MODE_OK;
}
@@ -1075,10 +1090,8 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
- if ((phy_freq == params[freq].phy_freq ||
- phy_freq == PHY_FREQ_1000_1001(params[freq].phy_freq)) &&
- (vclk_freq == params[freq].vclk_freq ||
- vclk_freq == PIXEL_FREQ_1000_1001(params[freq].vclk_freq))) {
+ if (meson_vclk_freqs_are_matching_param(freq, phy_freq,
+ vclk_freq)) {
if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 819a7e9381e3..f4bf40cd7c88 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -382,10 +382,10 @@ int mgag200_primary_plane_helper_get_scanout_buffer(struct drm_plane *plane,
.destroy = drm_plane_cleanup, \
DRM_GEM_SHADOW_PLANE_FUNCS
-void mgag200_crtc_set_gamma_linear(struct mga_device *mdev, const struct drm_format_info *format);
-void mgag200_crtc_set_gamma(struct mga_device *mdev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut);
+void mgag200_crtc_fill_gamma(struct mga_device *mdev, const struct drm_format_info *format);
+void mgag200_crtc_load_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut);
enum drm_mode_status mgag200_crtc_helper_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200er.c b/drivers/gpu/drm/mgag200/mgag200_g200er.c
index c20ed0ab50ec..23debc70dc54 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200er.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200er.c
@@ -200,9 +200,9 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200er_reset_tagfifo(mdev);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200ev.c b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
index 78be964eb97c..f8796e2b7a0f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200ev.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200ev.c
@@ -201,9 +201,9 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200ev_set_hiprilvl(mdev);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_g200se.c b/drivers/gpu/drm/mgag200/mgag200_g200se.c
index 7a32d3b1d226..e80da12ba1fe 100644
--- a/drivers/gpu/drm/mgag200/mgag200_g200se.c
+++ b/drivers/gpu/drm/mgag200/mgag200_g200se.c
@@ -332,9 +332,9 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, format);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 6067d08aeee3..951d715dea30 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -13,6 +13,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
@@ -30,35 +31,37 @@
* This file contains setup code for the CRTC.
*/
-void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
- const struct drm_format_info *format)
+static void mgag200_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
{
- int i;
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ WREG8(DAC_INDEX + MGA1064_INDEX, i8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, r8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, g8);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, b8);
+}
- WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+void mgag200_crtc_fill_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format)
+{
+ struct drm_crtc *crtc = &mdev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
- /* Use better interpolation, to take 32 values from 0 to 255 */
- for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 8 + i / 4);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i * 4 + i / 16);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- }
+ drm_crtc_fill_gamma_565(crtc, mgag200_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < MGAG200_LUT_SIZE; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, i);
- }
+ drm_crtc_fill_gamma_888(crtc, mgag200_set_gamma_lut);
break;
default:
drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n",
@@ -67,36 +70,19 @@ void mgag200_crtc_set_gamma_linear(struct mga_device *mdev,
}
}
-void mgag200_crtc_set_gamma(struct mga_device *mdev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+void mgag200_crtc_load_gamma(struct mga_device *mdev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
- int i;
-
- WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+ struct drm_crtc *crtc = &mdev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
- /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
- for (i = 0; i < MGAG200_LUT_SIZE / 8; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].red >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 8 + i / 4].blue >> 8);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = MGAG200_LUT_SIZE / 8; i < MGAG200_LUT_SIZE / 4; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i * 4 + i / 16].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, 0);
- }
+ drm_crtc_load_gamma_565_from_888(crtc, lut, mgag200_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
- for (i = 0; i < MGAG200_LUT_SIZE; i++) {
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].red >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].green >> 8);
- WREG8(DAC_INDEX + MGA1064_COL_PAL, lut[i].blue >> 8);
- }
+ drm_crtc_load_gamma_888(crtc, lut, mgag200_set_gamma_lut);
break;
default:
drm_warn_once(&mdev->base, "Unsupported format %p4cc for gamma correction\n",
@@ -642,9 +628,9 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
const struct drm_format_info *format = mgag200_crtc_state->format;
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
}
}
@@ -665,9 +651,9 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
funcs->pixpllc_atomic_update(crtc, old_state);
if (crtc_state->gamma_lut)
- mgag200_crtc_set_gamma(mdev, format, crtc_state->gamma_lut->data);
+ mgag200_crtc_load_gamma(mdev, format, crtc_state->gamma_lut->data);
else
- mgag200_crtc_set_gamma_linear(mdev, format);
+ mgag200_crtc_fill_gamma(mdev, format);
mgag200_enable_display(mdev);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3e9aa2cc38ef..d9be0fe3d674 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -30,7 +30,7 @@
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu,
struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
- uint32_t nr_cmds)
+ uint32_t nr_cmds, u64 drm_client_id)
{
static atomic_t ident = ATOMIC_INIT(0);
struct msm_gem_submit *submit;
@@ -54,7 +54,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return ERR_PTR(ret);
}
- ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue);
+ ret = drm_sched_job_init(&submit->base, queue->entity, 1, queue,
+ drm_client_id);
if (ret) {
kfree(submit->hw_fence);
kfree(submit);
@@ -693,7 +694,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
- submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds);
+ submit = submit_create(dev, gpu, queue, args->nr_bos, args->nr_cmds,
+ file->client_id);
if (IS_ERR(submit)) {
ret = PTR_ERR(submit);
goto out_post_unlock;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 6ded8c2b6d3b..d5654e26d5bc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -50,10 +50,10 @@ nouveau_fctx(struct nouveau_fence *fence)
return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
}
-static int
+static bool
nouveau_fence_signal(struct nouveau_fence *fence)
{
- int drop = 0;
+ bool drop = false;
dma_fence_signal_locked(&fence->base);
list_del(&fence->head);
@@ -63,7 +63,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
- drop = 1;
+ drop = true;
}
dma_fence_put(&fence->base);
@@ -83,13 +83,11 @@ nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm)
void
nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
{
- struct nouveau_fence *fence;
+ struct nouveau_fence *fence, *tmp;
unsigned long flags;
spin_lock_irqsave(&fctx->lock, flags);
- while (!list_empty(&fctx->pending)) {
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
-
+ list_for_each_entry_safe(fence, tmp, &fctx->pending, head) {
if (error && !dma_fence_is_signaled_locked(&fence->base))
dma_fence_set_error(&fence->base, error);
@@ -127,23 +125,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
}
-static int
+static void
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence *fence;
- int drop = 0;
+ struct nouveau_fence *fence, *tmp;
+ bool drop = false;
u32 seq = fctx->read(chan);
- while (!list_empty(&fctx->pending)) {
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
-
+ list_for_each_entry_safe(fence, tmp, &fctx->pending, head) {
if ((int)(seq - fence->base.seqno) < 0)
break;
- drop |= nouveau_fence_signal(fence);
+ if (nouveau_fence_signal(fence))
+ drop = true;
}
- return drop;
+ if (drop)
+ nvif_event_block(&fctx->event);
}
static void
@@ -151,22 +149,16 @@ nouveau_fence_uevent_work(struct work_struct *work)
{
struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
uevent_work);
+ struct nouveau_channel *chan;
+ struct nouveau_fence *fence;
unsigned long flags;
- int drop = 0;
spin_lock_irqsave(&fctx->lock, flags);
- if (!list_empty(&fctx->pending)) {
- struct nouveau_fence *fence;
- struct nouveau_channel *chan;
-
- fence = list_entry(fctx->pending.next, typeof(*fence), head);
+ fence = list_first_entry_or_null(&fctx->pending, typeof(*fence), head);
+ if (fence) {
chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (nouveau_fence_update(chan, fctx))
- drop = 1;
+ nouveau_fence_update(chan, fctx);
}
- if (drop)
- nvif_event_block(&fctx->event);
-
spin_unlock_irqrestore(&fctx->lock, flags);
}
@@ -246,9 +238,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
return -ENODEV;
}
- if (nouveau_fence_update(chan, fctx))
- nvif_event_block(&fctx->event);
-
+ nouveau_fence_update(chan, fctx);
list_add_tail(&fence->head, &fctx->pending);
spin_unlock_irq(&fctx->lock);
}
@@ -259,21 +249,19 @@ nouveau_fence_emit(struct nouveau_fence *fence)
bool
nouveau_fence_done(struct nouveau_fence *fence)
{
- if (fence->base.ops == &nouveau_fence_ops_legacy ||
- fence->base.ops == &nouveau_fence_ops_uevent) {
- struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- struct nouveau_channel *chan;
- unsigned long flags;
+ struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ struct nouveau_channel *chan;
+ unsigned long flags;
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
- return true;
+ if (dma_fence_is_signaled(&fence->base))
+ return true;
+
+ spin_lock_irqsave(&fctx->lock, flags);
+ chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+ if (chan)
+ nouveau_fence_update(chan, fctx);
+ spin_unlock_irqrestore(&fctx->lock, flags);
- spin_lock_irqsave(&fctx->lock, flags);
- chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
- if (chan && nouveau_fence_update(chan, fctx))
- nvif_event_block(&fctx->event);
- spin_unlock_irqrestore(&fctx->lock, flags);
- }
return dma_fence_is_signaled(&fence->base);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c
index d326e55d2d24..460a5fb02412 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sched.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sched.c
@@ -87,7 +87,8 @@ nouveau_job_init(struct nouveau_job *job,
}
ret = drm_sched_job_init(&job->base, &sched->entity,
- args->credits, NULL);
+ args->credits, NULL,
+ job->file_priv->client_id);
if (ret)
goto err_free_chains;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
index 4e09985424b6..e5bbd8563007 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/chan.c
@@ -104,7 +104,7 @@ nvkm_chan_cctx_get(struct nvkm_chan *chan, struct nvkm_engn *engn, struct nvkm_c
if (cctx) {
refcount_inc(&cctx->refs);
*pcctx = cctx;
- mutex_unlock(&chan->cgrp->mutex);
+ mutex_unlock(&cgrp->mutex);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
index 7e9e2d3564da..6e63df816d85 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/disp.c
@@ -648,7 +648,7 @@ r535_conn_new(struct nvkm_disp *disp, u32 id)
ctrl = nvkm_gsp_rm_ctrl_get(&disp->rm.objcom,
NV0073_CTRL_CMD_SPECIFIC_GET_CONNECTOR_DATA, sizeof(*ctrl));
if (IS_ERR(ctrl))
- return (void *)ctrl;
+ return ERR_CAST(ctrl);
ctrl->subDeviceInstance = 0;
ctrl->displayId = BIT(id);
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 533f70e8a4a6..cf055815077c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -524,7 +524,7 @@ static void dispc_save_context(struct dispc_device *dispc)
DSSDBG("context saved\n");
}
-static void dispc_restore_context(struct dispc_device *dispc)
+static noinline_for_stack void dispc_restore_context(struct dispc_device *dispc)
{
int i, j;
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 6eff97a09160..9f86db774c39 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -562,7 +562,6 @@ static const struct drm_bridge_funcs dpi_bridge_funcs = {
static void dpi_bridge_init(struct dpi_data *dpi)
{
- dpi->bridge.funcs = &dpi_bridge_funcs;
dpi->bridge.of_node = dpi->pdev->dev.of_node;
dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
@@ -707,9 +706,9 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
u32 datalines;
int r;
- dpi = devm_kzalloc(&pdev->dev, sizeof(*dpi), GFP_KERNEL);
- if (!dpi)
- return -ENOMEM;
+ dpi = devm_drm_bridge_alloc(&pdev->dev, struct dpi_data, bridge, &dpi_bridge_funcs);
+ if (IS_ERR(dpi))
+ return PTR_ERR(dpi);
ep = of_graph_get_next_port_endpoint(port, NULL);
if (!ep)
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 91ee63bfe0bc..b129e5a8d791 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -4701,7 +4701,6 @@ static const struct drm_bridge_funcs dsi_bridge_funcs = {
static void dsi_bridge_init(struct dsi_data *dsi)
{
- dsi->bridge.funcs = &dsi_bridge_funcs;
dsi->bridge.of_node = dsi->host.dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
@@ -4894,9 +4893,9 @@ static int dsi_probe(struct platform_device *pdev)
unsigned int i;
int r;
- dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(dev, struct dsi_data, bridge, &dsi_bridge_funcs);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
dsi->dev = dev;
dev_set_drvdata(dev, dsi);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index a3b22952fdc3..3cd612af2449 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -505,7 +505,6 @@ static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
{
- hdmi->bridge.funcs = &hdmi4_bridge_funcs;
hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
@@ -761,9 +760,9 @@ static int hdmi4_probe(struct platform_device *pdev)
int irq;
int r;
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(&pdev->dev, struct omap_hdmi, bridge, &hdmi4_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->pdev = pdev;
@@ -774,25 +773,24 @@ static int hdmi4_probe(struct platform_device *pdev)
r = hdmi4_probe_of(hdmi);
if (r)
- goto err_free;
+ return r;
r = hdmi_wp_init(pdev, &hdmi->wp, 4);
if (r)
- goto err_free;
+ return r;
r = hdmi_phy_init(pdev, &hdmi->phy, 4);
if (r)
- goto err_free;
+ return r;
r = hdmi4_core_init(pdev, &hdmi->core);
if (r)
- goto err_free;
+ return r;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
- r = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -800,7 +798,7 @@ static int hdmi4_probe(struct platform_device *pdev)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_free;
+ return r;
}
hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
@@ -808,7 +806,7 @@ static int hdmi4_probe(struct platform_device *pdev)
r = PTR_ERR(hdmi->vdda_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA regulator\n");
- goto err_free;
+ return r;
}
pm_runtime_enable(&pdev->dev);
@@ -827,8 +825,6 @@ err_uninit_output:
hdmi4_uninit_output(hdmi);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(hdmi);
return r;
}
@@ -841,8 +837,6 @@ static void hdmi4_remove(struct platform_device *pdev)
hdmi4_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
-
- kfree(hdmi);
}
static const struct of_device_id hdmi_of_match[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 0c98444d39a9..5636b3dfec1c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -480,7 +480,6 @@ static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
{
- hdmi->bridge.funcs = &hdmi5_bridge_funcs;
hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
@@ -727,9 +726,9 @@ static int hdmi5_probe(struct platform_device *pdev)
int irq;
int r;
- hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
+ hdmi = devm_drm_bridge_alloc(&pdev->dev, struct omap_hdmi, bridge, &hdmi5_bridge_funcs);
+ if (IS_ERR(hdmi))
+ return PTR_ERR(hdmi);
hdmi->pdev = pdev;
@@ -740,25 +739,24 @@ static int hdmi5_probe(struct platform_device *pdev)
r = hdmi5_probe_of(hdmi);
if (r)
- goto err_free;
+ return r;
r = hdmi_wp_init(pdev, &hdmi->wp, 5);
if (r)
- goto err_free;
+ return r;
r = hdmi_phy_init(pdev, &hdmi->phy, 5);
if (r)
- goto err_free;
+ return r;
r = hdmi5_core_init(pdev, &hdmi->core);
if (r)
- goto err_free;
+ return r;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
- r = -ENODEV;
- goto err_free;
+ return -ENODEV;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -766,7 +764,7 @@ static int hdmi5_probe(struct platform_device *pdev)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_free;
+ return r;
}
hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
@@ -774,7 +772,7 @@ static int hdmi5_probe(struct platform_device *pdev)
r = PTR_ERR(hdmi->vdda_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA regulator\n");
- goto err_free;
+ return r;
}
pm_runtime_enable(&pdev->dev);
@@ -793,8 +791,6 @@ err_uninit_output:
hdmi5_uninit_output(hdmi);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(hdmi);
return r;
}
@@ -807,8 +803,6 @@ static void hdmi5_remove(struct platform_device *pdev)
hdmi5_uninit_output(hdmi);
pm_runtime_disable(&pdev->dev);
-
- kfree(hdmi);
}
static const struct of_device_id hdmi_of_match[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index e78826e4b560..df4cbc683e2c 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -284,7 +284,6 @@ static const struct drm_bridge_funcs sdi_bridge_funcs = {
static void sdi_bridge_init(struct sdi_device *sdi)
{
- sdi->bridge.funcs = &sdi_bridge_funcs;
sdi->bridge.of_node = sdi->pdev->dev.of_node;
sdi->bridge.type = DRM_MODE_CONNECTOR_LVDS;
@@ -344,21 +343,19 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
u32 datapairs;
int r;
- sdi = kzalloc(sizeof(*sdi), GFP_KERNEL);
- if (!sdi)
- return -ENOMEM;
+ sdi = devm_drm_bridge_alloc(&pdev->dev, struct sdi_device, bridge, &sdi_bridge_funcs);
+ if (IS_ERR(sdi))
+ return PTR_ERR(sdi);
ep = of_graph_get_next_port_endpoint(port, NULL);
- if (!ep) {
- r = 0;
- goto err_free;
- }
+ if (!ep)
+ return 0;
r = of_property_read_u32(ep, "datapairs", &datapairs);
of_node_put(ep);
if (r) {
DSSERR("failed to parse datapairs\n");
- goto err_free;
+ return r;
}
sdi->datapairs = datapairs;
@@ -372,19 +369,14 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
r = PTR_ERR(sdi->vdds_sdi_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDS_SDI regulator\n");
- goto err_free;
+ return r;
}
r = sdi_init_output(sdi);
if (r)
- goto err_free;
+ return r;
return 0;
-
-err_free:
- kfree(sdi);
-
- return r;
}
void sdi_uninit_port(struct device_node *port)
@@ -395,5 +387,4 @@ void sdi_uninit_port(struct device_node *port)
return;
sdi_uninit_output(sdi);
- kfree(sdi);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 50349518eda1..9b5d53dc361e 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -664,7 +664,6 @@ static const struct drm_bridge_funcs venc_bridge_funcs = {
static void venc_bridge_init(struct venc_device *venc)
{
- venc->bridge.funcs = &venc_bridge_funcs;
venc->bridge.of_node = venc->pdev->dev.of_node;
venc->bridge.ops = DRM_BRIDGE_OP_MODES;
venc->bridge.type = DRM_MODE_CONNECTOR_SVIDEO;
@@ -809,9 +808,9 @@ static int venc_probe(struct platform_device *pdev)
struct venc_device *venc;
int r;
- venc = kzalloc(sizeof(*venc), GFP_KERNEL);
- if (!venc)
- return -ENOMEM;
+ venc = devm_drm_bridge_alloc(&pdev->dev, struct venc_device, bridge, &venc_bridge_funcs);
+ if (IS_ERR(venc))
+ return PTR_ERR(venc);
venc->pdev = pdev;
@@ -824,26 +823,24 @@ static int venc_probe(struct platform_device *pdev)
venc->config = &venc_config_pal_trm;
venc->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(venc->base)) {
- r = PTR_ERR(venc->base);
- goto err_free;
- }
+ if (IS_ERR(venc->base))
+ return PTR_ERR(venc->base);
venc->vdda_dac_reg = devm_regulator_get(&pdev->dev, "vdda");
if (IS_ERR(venc->vdda_dac_reg)) {
r = PTR_ERR(venc->vdda_dac_reg);
if (r != -EPROBE_DEFER)
DSSERR("can't get VDDA_DAC regulator\n");
- goto err_free;
+ return r;
}
r = venc_get_clocks(venc);
if (r)
- goto err_free;
+ return r;
r = venc_probe_of(venc);
if (r)
- goto err_free;
+ return r;
pm_runtime_enable(&pdev->dev);
@@ -861,8 +858,6 @@ err_uninit_output:
venc_uninit_output(venc);
err_pm_disable:
pm_runtime_disable(&pdev->dev);
-err_free:
- kfree(venc);
return r;
}
@@ -875,8 +870,6 @@ static void venc_remove(struct platform_device *pdev)
venc_uninit_output(venc);
pm_runtime_disable(&pdev->dev);
-
- kfree(venc);
}
static __maybe_unused int venc_runtime_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index cfebb08e8a62..d5aa1c95c6a4 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -647,6 +647,32 @@ config DRM_PANEL_RAYDIUM_RM69380
This panel controller can be found in the Lenovo Xiaoxin Pad Pro 2021
in combination with an EDO OLED panel.
+config DRM_PANEL_RENESAS_R61307
+ tristate "Renesas R61307 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for KOE tx13d100vm0eaa
+ IPS-LCD module with Renesas R69328 IC. The panel has a 1024x768
+ resolution and uses 24 bit RGB per pixel.
+
+ This panel controller can be found in LG Optimus Vu P895 smartphone
+ in combination with LCD panel.
+
+config DRM_PANEL_RENESAS_R69328
+ tristate "Renesas R69328 720x1280 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for JDI dx12d100vm0eaa
+ IPS-LCD module with Renesas R69328 IC. The panel has a 720x1280
+ resolution and uses 24 bit RGB per pixel.
+
+ This panel controller can be found in LG Optimus 4X P895 smartphone
+ in combination with LCD panel.
+
config DRM_PANEL_RONBO_RB070D30
tristate "Ronbo Electronics RB070D30 panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 714cbac830e3..73a39bc72604 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -65,6 +65,8 @@ obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM67200) += panel-raydium-rm67200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM692E5) += panel-raydium-rm692e5.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM69380) += panel-raydium-rm69380.o
+obj-$(CONFIG_DRM_PANEL_RENESAS_R61307) += panel-renesas-r61307.o
+obj-$(CONFIG_DRM_PANEL_RENESAS_R69328) += panel-renesas-r69328.o
obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS581VF01) += panel-samsung-ams581vf01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_AMS639RQ08) += panel-samsung-ams639rq08.o
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index 90e8c154a978..6c45c9e879ec 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -113,7 +113,7 @@ struct panel_delay {
* // do fixed enable delay
* // enforce prepare_to_enable min time
*
- * This is not specified in a standard way on eDP timing diagrams.
+ * This is usually (T4+T5+T6+T8)-min on eDP timing diagrams.
* It is effectively the time from HPD going high till you can
* turn on the backlight.
*/
@@ -1869,6 +1869,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('A', 'U', 'O', 0x235c, &delay_200_500_e50, "B116XTN02.3"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x239b, &delay_200_500_e50, "B116XAN06.1"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x255c, &delay_200_500_e50, "B116XTN02.5"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x30ed, &delay_200_500_e50, "G156HAN03.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x403d, &delay_200_500_e50, "B140HAN04.0"),
EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAN04.0"),
EDP_PANEL_ENTRY2('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01.0",
@@ -1923,6 +1924,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('B', 'O', 'E', 0x094b, &delay_200_500_e50, "NT116WHM-N21"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0951, &delay_200_500_e80, "NV116WHM-N47"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x095f, &delay_200_500_e50, "NE135FBM-N41 v8.1"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0964, &delay_200_500_e50, "NV133WUM-N61"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x096e, &delay_200_500_e50_po2e200, "NV116WHM-T07 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x0979, &delay_200_500_e50, "NV116WHM-N49 V8.0"),
EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
@@ -1973,6 +1975,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d4, &delay_200_500_e80_d50, "N140HCA-EAC"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14d6, &delay_200_500_e80_d50, "N140BGA-EA4"),
EDP_PANEL_ENTRY('C', 'M', 'N', 0x14e5, &delay_200_500_e80_d50, "N140HGA-EA1"),
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x162b, &delay_200_500_e80_d50, "N160JCE-ELL"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1200, &delay_200_500_e50_p2e200, "MNC207QS1-1"),
EDP_PANEL_ENTRY('C', 'S', 'O', 0x1413, &delay_200_500_e50_p2e200, "MNE007JA1-2"),
@@ -2005,6 +2008,7 @@ static const struct edp_panel_entry edp_panels[] = {
EDP_PANEL_ENTRY('K', 'D', 'C', 0x044f, &delay_200_500_e50, "KD116N9-30NH-F3"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x05f1, &delay_200_500_e80_d50, "KD116N5-30NV-G7"),
EDP_PANEL_ENTRY('K', 'D', 'C', 0x0809, &delay_200_500_e50, "KD116N2930A15"),
+ EDP_PANEL_ENTRY('K', 'D', 'C', 0x1220, &delay_200_500_e50, "KD116N3730A05"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x0000, &delay_200_500_e200_d200, "Unknown"),
EDP_PANEL_ENTRY('L', 'G', 'D', 0x048d, &delay_200_500_e200_d200, "Unknown"),
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index b904d5437444..1f177834d629 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -206,9 +206,10 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
struct kd35t133 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct kd35t133, panel,
+ &kd35t133_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -248,9 +249,6 @@ static int kd35t133_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index 986e3e192881..6225501cb174 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -443,9 +443,11 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
unsigned int i;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct k101_im2ba02, panel,
+ &k101_im2ba02_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
@@ -463,9 +465,6 @@ static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset),
"Couldn't get our reset GPIO\n");
- drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 48e3acaecdf3..4f8d6d8c07e4 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -189,16 +189,14 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
struct feiyang *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct feiyang, panel,
+ &feiyang_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->dvdd),
diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112a.c b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
index 47bce087e339..142cb1cc067a 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx83112a.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx83112a.c
@@ -269,9 +269,11 @@ static int hx83112a_probe(struct mipi_dsi_device *dsi)
struct hx83112a_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct hx83112a_panel, panel,
+ &hx83112a_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd1";
ctx->supplies[1].supply = "vsn";
@@ -295,8 +297,6 @@ static int hx83112a_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &hx83112a_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-himax-hx8394.c b/drivers/gpu/drm/panel/panel-himax-hx8394.c
index ff994bf0e3cc..c4d3e09a228d 100644
--- a/drivers/gpu/drm/panel/panel-himax-hx8394.c
+++ b/drivers/gpu/drm/panel/panel-himax-hx8394.c
@@ -477,6 +477,147 @@ static const struct hx8394_panel_desc mchp_ac40t08a_desc = {
.init_sequence = mchp_ac40t08a_init_sequence,
};
+/*
+ * HL055FHAV028C is based on Himax HX8399, so datasheet pages are
+ * slightly different than HX8394 based panels.
+ */
+static void hl055fhav028c_init_sequence(struct mipi_dsi_multi_context *dsi_ctx)
+{
+ /* 6.3.6 SETEXTC: Set extension command (B9h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETEXTC,
+ 0xff, 0x83, 0x99);
+
+ /* 6.3.17 SETOFFSET: Set offset voltage (D2h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETOFFSET,
+ 0x77);
+
+ /* 6.3.1 SETPOWER: Set power (B1h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPOWER,
+ 0x02, 0x04, 0x74, 0x94, 0x01, 0x32,
+ 0x33, 0x11, 0x11, 0xab, 0x4d, 0x56,
+ 0x73, 0x02, 0x02);
+
+ /* 6.3.2 SETDISP: Set display related register (B2h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETDISP,
+ 0x00, 0x80, 0x80, 0xae, 0x05, 0x07,
+ 0x5a, 0x11, 0x00, 0x00, 0x10, 0x1e,
+ 0x70, 0x03, 0xd4);
+
+ /* 6.3.3 SETCYC: Set display waveform cycles (B4h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETCYC,
+ 0x00, 0xff, 0x02, 0xc0, 0x02, 0xc0,
+ 0x00, 0x00, 0x08, 0x00, 0x04, 0x06,
+ 0x00, 0x32, 0x04, 0x0a, 0x08, 0x21,
+ 0x03, 0x01, 0x00, 0x0f, 0xb8, 0x8b,
+ 0x02, 0xc0, 0x02, 0xc0, 0x00, 0x00,
+ 0x08, 0x00, 0x04, 0x06, 0x00, 0x32,
+ 0x04, 0x0a, 0x08, 0x01, 0x00, 0x0f,
+ 0xb8, 0x01);
+
+ /* 6.3.18 SETGIP0: Set GIP Option0 (D3h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP0,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x06, 0x00, 0x00, 0x10, 0x04, 0x00,
+ 0x04, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x05, 0x05, 0x07, 0x00, 0x00,
+ 0x00, 0x05, 0x40);
+
+ /* 6.3.19 Set GIP Option1 (D5h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP1,
+ 0x18, 0x18, 0x19, 0x19, 0x18, 0x18,
+ 0x21, 0x20, 0x01, 0x00, 0x07, 0x06,
+ 0x05, 0x04, 0x03, 0x02, 0x18, 0x18,
+ 0x18, 0x18, 0x18, 0x18, 0x2f, 0x2f,
+ 0x30, 0x30, 0x31, 0x31, 0x18, 0x18,
+ 0x18, 0x18);
+
+ /* 6.3.20 Set GIP Option2 (D6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGIP2,
+ 0x18, 0x18, 0x19, 0x19, 0x40, 0x40,
+ 0x20, 0x21, 0x02, 0x03, 0x04, 0x05,
+ 0x06, 0x07, 0x00, 0x01, 0x40, 0x40,
+ 0x40, 0x40, 0x40, 0x40, 0x2f, 0x2f,
+ 0x30, 0x30, 0x31, 0x31, 0x40, 0x40,
+ 0x40, 0x40);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xa2, 0xaa, 0x02, 0xa0, 0xa2, 0xa8,
+ 0x02, 0xa0, 0xb0, 0x00, 0x00, 0x00,
+ 0xb0, 0x00, 0x00, 0x00);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x01);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xb0, 0x00, 0x00, 0x00, 0xb0, 0x00,
+ 0x00, 0x00, 0xe2, 0xaa, 0x03, 0xf0,
+ 0xe2, 0xaa, 0x03, 0xf0);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x02);
+
+ /* 6.3.21 Set GIP Option3 (D8h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN4,
+ 0xe2, 0xaa, 0x03, 0xf0, 0xe2, 0xaa,
+ 0x03, 0xf0);
+
+ /* 6.3.9 Set register bank (BDh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETREGBANK,
+ 0x00);
+
+ /* 6.3.4 SETVCOM: Set VCOM voltage (B6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETVCOM,
+ 0x7a, 0x7a);
+
+ /* 6.3.26 SETGAMMA: Set gamma curve related setting (E0h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETGAMMA,
+ 0x00, 0x18, 0x27, 0x24, 0x5a, 0x68,
+ 0x79, 0x78, 0x81, 0x8a, 0x92, 0x99,
+ 0x9e, 0xa7, 0xaf, 0xb4, 0xb9, 0xc3,
+ 0xc7, 0xd1, 0xc6, 0xd4, 0xd5, 0x6c,
+ 0x67, 0x71, 0x77, 0x00, 0x00, 0x18,
+ 0x27, 0x24, 0x5a, 0x68, 0x79, 0x78,
+ 0x81, 0x8a, 0x92, 0x99, 0x9e, 0xa7,
+ 0xaf, 0xb4, 0xb9, 0xc3, 0xc7, 0xd1,
+ 0xc6, 0xd4, 0xd5, 0x6c, 0x67, 0x77);
+
+ /* Unknown command, not listed in the HX8399-C datasheet (C6h) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_UNKNOWN2,
+ 0xff, 0xf9);
+
+ /* 6.3.16 SETPANEL (CCh) */
+ mipi_dsi_dcs_write_seq_multi(dsi_ctx, HX8394_CMD_SETPANEL,
+ 0x08);
+}
+
+static const struct drm_display_mode hl055fhav028c_mode = {
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 32,
+ .hsync_end = 1080 + 32 + 8,
+ .htotal = 1080 + 32 + 8 + 32,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 16,
+ .vsync_end = 1920 + 16 + 2,
+ .vtotal = 1920 + 16 + 2 + 14,
+ .clock = 134920,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 70,
+ .height_mm = 127,
+};
+
+static const struct hx8394_panel_desc hl055fhav028c_desc = {
+ .mode = &hl055fhav028c_mode,
+ .lanes = 4,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_sequence = hl055fhav028c_init_sequence,
+};
+
static int hx8394_enable(struct drm_panel *panel)
{
struct hx8394 *ctx = panel_to_hx8394(panel);
@@ -611,9 +752,11 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
struct hx8394 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct hx8394, panel,
+ &hx8394_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -645,9 +788,6 @@ static int hx8394_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->iovcc),
"Failed to request iovcc regulator\n");
- drm_panel_init(&ctx->panel, dev, &hx8394_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
@@ -683,6 +823,7 @@ static void hx8394_remove(struct mipi_dsi_device *dsi)
static const struct of_device_id hx8394_of_match[] = {
{ .compatible = "hannstar,hsd060bhw4", .data = &hsd060bhw4_desc },
+ { .compatible = "huiling,hl055fhav028c", .data = &hl055fhav028c_desc },
{ .compatible = "powkiddy,x55-panel", .data = &powkiddy_x55_desc },
{ .compatible = "microchip,ac40t08a-mipi-panel", .data = &mchp_ac40t08a_desc },
{ /* sentinel */ }
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 94b7dfef3b5e..6ed544a83bdd 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -722,9 +722,10 @@ static int ili9322_probe(struct spi_device *spi)
int ret;
int i;
- ili = devm_kzalloc(dev, sizeof(struct ili9322), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(dev, struct ili9322, panel,
+ &ili9322_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
spi_set_drvdata(spi, ili);
@@ -883,9 +884,6 @@ static int ili9322_probe(struct spi_device *spi)
ili->input = ili->conf->input;
}
- drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&ili->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
index ff39f5dd4097..f7425dfaa50d 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c
@@ -173,7 +173,6 @@ struct ili9341_config {
};
struct ili9341 {
- struct device *dev;
const struct ili9341_config *conf;
struct drm_panel panel;
struct gpio_desc *reset_gpio;
@@ -490,9 +489,11 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
struct ili9341 *ili;
int ret;
- ili = devm_kzalloc(dev, sizeof(struct ili9341), GFP_KERNEL);
- if (!ili)
- return -ENOMEM;
+ ili = devm_drm_panel_alloc(dev, struct ili9341, panel,
+ &ili9341_dpi_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ili))
+ return PTR_ERR(ili);
ili->dbi = devm_kzalloc(dev, sizeof(struct mipi_dbi),
GFP_KERNEL);
@@ -526,8 +527,6 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
}
ili->max_spi_speed = ili->conf->max_spi_speed;
- drm_panel_init(&ili->panel, dev, &ili9341_dpi_funcs,
- DRM_MODE_CONNECTOR_DPI);
drm_panel_add(&ili->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
index 1cbc25758bd2..e6c483851f1f 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9805.c
@@ -307,9 +307,12 @@ static int ili9805_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9805 *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ili9805, panel,
+ &ili9805_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
ctx->desc = of_device_get_match_data(&dsi->dev);
@@ -320,9 +323,6 @@ static int ili9805_dsi_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET;
dsi->lanes = 2;
- drm_panel_init(&ctx->panel, &dsi->dev, &ili9805_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
if (IS_ERR(ctx->dvdd))
return PTR_ERR(ctx->dvdd);
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
index a3c79ad99d0b..18aa6222b0c5 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9806e.c
@@ -166,9 +166,10 @@ static int ili9806e_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9806e_panel *ctx;
int i, ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ili9806e_panel, panel, &ili9806e_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = device_get_match_data(dev);
@@ -192,9 +193,6 @@ static int ili9806e_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = ctx->desc->format;
dsi->lanes = ctx->desc->lanes;
- drm_panel_init(&ctx->panel, dev, &ili9806e_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation);
if (ret)
return dev_err_probe(dev, ret, "Failed to get orientation\n");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 28cd7560e5db..ac433345a179 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -43,6 +43,7 @@ struct ili9881c_desc {
const struct drm_display_mode *mode;
const unsigned long mode_flags;
u8 default_address_mode;
+ unsigned int lanes;
};
struct ili9881c {
@@ -1223,6 +1224,199 @@ static const struct ili9881c_instr am8001280g_init[] = {
ILI9881C_COMMAND_INSTR(MIPI_DCS_WRITE_POWER_SAVE, 0x00),
};
+static const struct ili9881c_instr rpi_7inch_init[] = {
+ ILI9881C_SWITCH_PAGE_INSTR(3),
+ ILI9881C_COMMAND_INSTR(0x01, 0x00),
+ ILI9881C_COMMAND_INSTR(0x02, 0x00),
+ ILI9881C_COMMAND_INSTR(0x03, 0x73),
+ ILI9881C_COMMAND_INSTR(0x04, 0x00),
+ ILI9881C_COMMAND_INSTR(0x05, 0x00),
+ ILI9881C_COMMAND_INSTR(0x06, 0x0a),
+ ILI9881C_COMMAND_INSTR(0x07, 0x00),
+ ILI9881C_COMMAND_INSTR(0x08, 0x00),
+ ILI9881C_COMMAND_INSTR(0x09, 0x61),
+ ILI9881C_COMMAND_INSTR(0x0a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0c, 0x01),
+ ILI9881C_COMMAND_INSTR(0x0d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x0f, 0x61),
+ ILI9881C_COMMAND_INSTR(0x10, 0x61),
+ ILI9881C_COMMAND_INSTR(0x11, 0x00),
+ ILI9881C_COMMAND_INSTR(0x12, 0x00),
+ ILI9881C_COMMAND_INSTR(0x13, 0x00),
+ ILI9881C_COMMAND_INSTR(0x14, 0x00),
+ ILI9881C_COMMAND_INSTR(0x15, 0x00),
+ ILI9881C_COMMAND_INSTR(0x16, 0x00),
+ ILI9881C_COMMAND_INSTR(0x17, 0x00),
+ ILI9881C_COMMAND_INSTR(0x18, 0x00),
+ ILI9881C_COMMAND_INSTR(0x19, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x1e, 0x40),
+ ILI9881C_COMMAND_INSTR(0x1f, 0x80),
+ ILI9881C_COMMAND_INSTR(0x20, 0x06),
+ ILI9881C_COMMAND_INSTR(0x21, 0x01),
+ ILI9881C_COMMAND_INSTR(0x22, 0x00),
+ ILI9881C_COMMAND_INSTR(0x23, 0x00),
+ ILI9881C_COMMAND_INSTR(0x24, 0x00),
+ ILI9881C_COMMAND_INSTR(0x25, 0x00),
+ ILI9881C_COMMAND_INSTR(0x26, 0x00),
+ ILI9881C_COMMAND_INSTR(0x27, 0x00),
+ ILI9881C_COMMAND_INSTR(0x28, 0x33),
+ ILI9881C_COMMAND_INSTR(0x29, 0x03),
+ ILI9881C_COMMAND_INSTR(0x2a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x2f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x30, 0x00),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x32, 0x00),
+ ILI9881C_COMMAND_INSTR(0x33, 0x00),
+ ILI9881C_COMMAND_INSTR(0x34, 0x04),
+ ILI9881C_COMMAND_INSTR(0x35, 0x00),
+ ILI9881C_COMMAND_INSTR(0x36, 0x00),
+ ILI9881C_COMMAND_INSTR(0x37, 0x00),
+ ILI9881C_COMMAND_INSTR(0x38, 0x3c),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3b, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3c, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3d, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x3f, 0x00),
+ ILI9881C_COMMAND_INSTR(0x40, 0x00),
+ ILI9881C_COMMAND_INSTR(0x41, 0x00),
+ ILI9881C_COMMAND_INSTR(0x42, 0x00),
+ ILI9881C_COMMAND_INSTR(0x43, 0x00),
+ ILI9881C_COMMAND_INSTR(0x44, 0x00),
+ ILI9881C_COMMAND_INSTR(0x50, 0x10),
+ ILI9881C_COMMAND_INSTR(0x51, 0x32),
+ ILI9881C_COMMAND_INSTR(0x52, 0x54),
+ ILI9881C_COMMAND_INSTR(0x53, 0x76),
+ ILI9881C_COMMAND_INSTR(0x54, 0x98),
+ ILI9881C_COMMAND_INSTR(0x55, 0xba),
+ ILI9881C_COMMAND_INSTR(0x56, 0x10),
+ ILI9881C_COMMAND_INSTR(0x57, 0x32),
+ ILI9881C_COMMAND_INSTR(0x58, 0x54),
+ ILI9881C_COMMAND_INSTR(0x59, 0x76),
+ ILI9881C_COMMAND_INSTR(0x5a, 0x98),
+ ILI9881C_COMMAND_INSTR(0x5b, 0xba),
+ ILI9881C_COMMAND_INSTR(0x5c, 0xdc),
+ ILI9881C_COMMAND_INSTR(0x5d, 0xfe),
+ ILI9881C_COMMAND_INSTR(0x5e, 0x00),
+ ILI9881C_COMMAND_INSTR(0x5f, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x60, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x61, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x62, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x63, 0x06),
+ ILI9881C_COMMAND_INSTR(0x64, 0x07),
+ ILI9881C_COMMAND_INSTR(0x65, 0x02),
+ ILI9881C_COMMAND_INSTR(0x66, 0x02),
+ ILI9881C_COMMAND_INSTR(0x67, 0x02),
+ ILI9881C_COMMAND_INSTR(0x68, 0x02),
+ ILI9881C_COMMAND_INSTR(0x69, 0x01),
+ ILI9881C_COMMAND_INSTR(0x6a, 0x00),
+ ILI9881C_COMMAND_INSTR(0x6b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6c, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6d, 0x14),
+ ILI9881C_COMMAND_INSTR(0x6e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x6f, 0x02),
+ ILI9881C_COMMAND_INSTR(0x70, 0x02),
+ ILI9881C_COMMAND_INSTR(0x71, 0x02),
+ ILI9881C_COMMAND_INSTR(0x72, 0x02),
+ ILI9881C_COMMAND_INSTR(0x73, 0x02),
+ ILI9881C_COMMAND_INSTR(0x74, 0x02),
+ ILI9881C_COMMAND_INSTR(0x75, 0x0e),
+ ILI9881C_COMMAND_INSTR(0x76, 0x0f),
+ ILI9881C_COMMAND_INSTR(0x77, 0x0c),
+ ILI9881C_COMMAND_INSTR(0x78, 0x0d),
+ ILI9881C_COMMAND_INSTR(0x79, 0x06),
+ ILI9881C_COMMAND_INSTR(0x7a, 0x07),
+ ILI9881C_COMMAND_INSTR(0x7b, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7c, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7d, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7e, 0x02),
+ ILI9881C_COMMAND_INSTR(0x7f, 0x01),
+ ILI9881C_COMMAND_INSTR(0x80, 0x00),
+ ILI9881C_COMMAND_INSTR(0x81, 0x02),
+ ILI9881C_COMMAND_INSTR(0x82, 0x14),
+ ILI9881C_COMMAND_INSTR(0x83, 0x15),
+ ILI9881C_COMMAND_INSTR(0x84, 0x02),
+ ILI9881C_COMMAND_INSTR(0x85, 0x02),
+ ILI9881C_COMMAND_INSTR(0x86, 0x02),
+ ILI9881C_COMMAND_INSTR(0x87, 0x02),
+ ILI9881C_COMMAND_INSTR(0x88, 0x02),
+ ILI9881C_COMMAND_INSTR(0x89, 0x02),
+ ILI9881C_COMMAND_INSTR(0x8A, 0x02),
+ ILI9881C_SWITCH_PAGE_INSTR(4),
+ ILI9881C_COMMAND_INSTR(0x6C, 0x15),
+ ILI9881C_COMMAND_INSTR(0x6E, 0x2A),
+ ILI9881C_COMMAND_INSTR(0x6F, 0x33),
+ ILI9881C_COMMAND_INSTR(0x3B, 0x98),
+ ILI9881C_COMMAND_INSTR(0x3a, 0x94),
+ ILI9881C_COMMAND_INSTR(0x8D, 0x14),
+ ILI9881C_COMMAND_INSTR(0x87, 0xBA),
+ ILI9881C_COMMAND_INSTR(0x26, 0x76),
+ ILI9881C_COMMAND_INSTR(0xB2, 0xD1),
+ ILI9881C_COMMAND_INSTR(0xB5, 0x06),
+ ILI9881C_COMMAND_INSTR(0x38, 0x01),
+ ILI9881C_COMMAND_INSTR(0x39, 0x00),
+ ILI9881C_SWITCH_PAGE_INSTR(1),
+ ILI9881C_COMMAND_INSTR(0x22, 0x0A),
+ ILI9881C_COMMAND_INSTR(0x31, 0x00),
+ ILI9881C_COMMAND_INSTR(0x53, 0x7d),
+ ILI9881C_COMMAND_INSTR(0x55, 0x8f),
+ ILI9881C_COMMAND_INSTR(0x40, 0x33),
+ ILI9881C_COMMAND_INSTR(0x50, 0x96),
+ ILI9881C_COMMAND_INSTR(0x51, 0x96),
+ ILI9881C_COMMAND_INSTR(0x60, 0x23),
+ ILI9881C_COMMAND_INSTR(0xA0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xA1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xA3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xA4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xA5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xA6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xA7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xA8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xA9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xAA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xAB, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xAC, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xAD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xAE, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xAF, 0x20),
+ ILI9881C_COMMAND_INSTR(0xB0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xB1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xB2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xB3, 0x39),
+ ILI9881C_COMMAND_INSTR(0xC0, 0x08),
+ ILI9881C_COMMAND_INSTR(0xC1, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xC2, 0x2a),
+ ILI9881C_COMMAND_INSTR(0xC3, 0x10),
+ ILI9881C_COMMAND_INSTR(0xC4, 0x15),
+ ILI9881C_COMMAND_INSTR(0xC5, 0x28),
+ ILI9881C_COMMAND_INSTR(0xC6, 0x1c),
+ ILI9881C_COMMAND_INSTR(0xC7, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xC8, 0x7e),
+ ILI9881C_COMMAND_INSTR(0xC9, 0x1d),
+ ILI9881C_COMMAND_INSTR(0xCA, 0x29),
+ ILI9881C_COMMAND_INSTR(0xCB, 0x6b),
+ ILI9881C_COMMAND_INSTR(0xCC, 0x1a),
+ ILI9881C_COMMAND_INSTR(0xCD, 0x18),
+ ILI9881C_COMMAND_INSTR(0xCE, 0x4b),
+ ILI9881C_COMMAND_INSTR(0xCF, 0x20),
+ ILI9881C_COMMAND_INSTR(0xD0, 0x27),
+ ILI9881C_COMMAND_INSTR(0xD1, 0x50),
+ ILI9881C_COMMAND_INSTR(0xD2, 0x64),
+ ILI9881C_COMMAND_INSTR(0xD3, 0x39),
+};
+
static inline struct ili9881c *panel_to_ili9881c(struct drm_panel *panel)
{
return container_of(panel, struct ili9881c, panel);
@@ -1449,6 +1643,23 @@ static const struct drm_display_mode am8001280g_default_mode = {
.height_mm = 151,
};
+static const struct drm_display_mode rpi_7inch_default_mode = {
+ .clock = 83330,
+
+ .hdisplay = 720,
+ .hsync_start = 720 + 239,
+ .hsync_end = 720 + 239 + 33,
+ .htotal = 720 + 239 + 33 + 50,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 2,
+ .vtotal = 1280 + 20 + 2 + 30,
+
+ .width_mm = 90,
+ .height_mm = 151,
+};
+
static int ili9881c_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -1506,16 +1717,15 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
struct ili9881c *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ili9881c, panel, &ili9881c_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
ctx->desc = of_device_get_match_data(&dsi->dev);
- drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->power = devm_regulator_get(&dsi->dev, "power");
if (IS_ERR(ctx->power))
return dev_err_probe(&dsi->dev, PTR_ERR(ctx->power),
@@ -1549,7 +1759,7 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = ctx->desc->mode_flags;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->lanes = 4;
+ dsi->lanes = ctx->desc->lanes;
return mipi_dsi_attach(dsi);
}
@@ -1567,6 +1777,7 @@ static const struct ili9881c_desc lhr050h41_desc = {
.init_length = ARRAY_SIZE(lhr050h41_init),
.mode = &lhr050h41_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .lanes = 4,
};
static const struct ili9881c_desc k101_im2byl02_desc = {
@@ -1574,6 +1785,7 @@ static const struct ili9881c_desc k101_im2byl02_desc = {
.init_length = ARRAY_SIZE(k101_im2byl02_init),
.mode = &k101_im2byl02_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO_SYNC_PULSE,
+ .lanes = 4,
};
static const struct ili9881c_desc kd050hdfia020_desc = {
@@ -1599,6 +1811,7 @@ static const struct ili9881c_desc w552946aba_desc = {
.mode = &w552946aba_default_mode,
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET,
+ .lanes = 4,
};
static const struct ili9881c_desc am8001280g_desc = {
@@ -1609,6 +1822,14 @@ static const struct ili9881c_desc am8001280g_desc = {
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
};
+static const struct ili9881c_desc rpi_7inch_desc = {
+ .init = rpi_7inch_init,
+ .init_length = ARRAY_SIZE(rpi_7inch_init),
+ .mode = &rpi_7inch_default_mode,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM,
+ .lanes = 2,
+};
+
static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "bananapi,lhr050h41", .data = &lhr050h41_desc },
{ .compatible = "feixin,k101-im2byl02", .data = &k101_im2byl02_desc },
@@ -1616,6 +1837,7 @@ static const struct of_device_id ili9881c_of_match[] = {
{ .compatible = "tdo,tl050hdv35", .data = &tl050hdv35_desc },
{ .compatible = "wanchanglong,w552946aba", .data = &w552946aba_desc },
{ .compatible = "ampire,am8001280g", .data = &am8001280g_desc },
+ { .compatible = "raspberrypi,dsi-7inch", &rpi_7inch_desc },
{ }
};
MODULE_DEVICE_TABLE(of, ili9881c_of_match);
diff --git a/drivers/gpu/drm/panel/panel-innolux-ej030na.c b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
index f85b7a4cbb42..b2309900873b 100644
--- a/drivers/gpu/drm/panel/panel-innolux-ej030na.c
+++ b/drivers/gpu/drm/panel/panel-innolux-ej030na.c
@@ -204,9 +204,11 @@ static int ej030na_probe(struct spi_device *spi)
struct ej030na *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct ej030na, panel,
+ &ej030na_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->spi = spi;
spi_set_drvdata(spi, priv);
@@ -231,9 +233,6 @@ static int ej030na_probe(struct spi_device *spi)
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
"Failed to get reset GPIO\n");
- drm_panel_init(&priv->panel, dev, &ej030na_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index d95c0d4f3e35..80afeeab9475 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -382,9 +382,11 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
struct device *dev = &dsi->dev;
int err, i;
- innolux = devm_kzalloc(dev, sizeof(*innolux), GFP_KERNEL);
- if (!innolux)
- return -ENOMEM;
+ innolux = devm_drm_panel_alloc(dev, struct innolux_panel, base,
+ &innolux_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(innolux))
+ return PTR_ERR(innolux);
innolux->desc = desc;
@@ -410,9 +412,6 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi,
innolux->enable_gpio = NULL;
}
- drm_panel_init(&innolux->base, dev, &innolux_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&innolux->base);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index eb0f8373258c..5c2530598ddb 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -1120,9 +1120,10 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
struct jadard *jadard;
int ret;
- jadard = devm_kzalloc(&dsi->dev, sizeof(*jadard), GFP_KERNEL);
- if (!jadard)
- return -ENOMEM;
+ jadard = devm_drm_panel_alloc(dev, struct jadard, panel, &jadard_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(jadard))
+ return PTR_ERR(jadard);
desc = of_device_get_match_data(dev);
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
@@ -1148,9 +1149,6 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(jadard->vccio);
}
- drm_panel_init(&jadard->panel, dev, &jadard_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = of_drm_get_panel_orientation(dev->of_node, &jadard->orientation);
if (ret < 0)
return dev_err_probe(dev, ret, "failed to get orientation\n");
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 4eb71e85e9e9..cbe354b51bce 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -175,9 +175,11 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
struct jdi_fhd_r63452 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct jdi_fhd_r63452, panel,
+ &jdi_fhd_r63452_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -192,8 +194,6 @@ static int jdi_fhd_r63452_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &jdi_fhd_r63452_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
index 77f74e6c467e..0856df5a6ee2 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -548,9 +548,11 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
struct ltk050h3146w *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ltk050h3146w, panel,
+ &ltk050h3146w_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_desc = of_device_get_match_data(dev);
if (!ctx->panel_desc)
@@ -577,9 +579,6 @@ static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = ctx->panel_desc->mode_flags;
- drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 6b18cf00fd4a..7f19fd5b8060 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -604,9 +604,11 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
struct device *dev = &dsi->dev;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ltk500hd1829, panel,
+ &ltk500hd1829_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_desc = of_device_get_match_data(dev);
if (!ctx->panel_desc)
@@ -643,9 +645,6 @@ static int ltk500hd1829_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&ctx->panel, &dsi->dev, &ltk500hd1829_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index 9d0d4faa3f58..b2be6727bf73 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -178,9 +178,10 @@ static int lb035q02_probe(struct spi_device *spi)
struct lb035q02_device *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct lb035q02_device, panel,
+ &lb035q02_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -195,9 +196,6 @@ static int lb035q02_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index cf246d15b7b6..dec619902c15 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -243,9 +243,11 @@ static int lg4573_probe(struct spi_device *spi)
struct lg4573 *ctx;
int ret;
- ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&spi->dev, struct lg4573, panel,
+ &lg4573_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->spi = spi;
@@ -258,9 +260,6 @@ static int lg4573_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&ctx->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c b/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
index 032c542aab0f..24b34443ace0 100644
--- a/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
+++ b/drivers/gpu/drm/panel/panel-lincolntech-lcd197.c
@@ -190,9 +190,11 @@ static int lincoln_lcd197_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = (MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST);
- lcd = devm_kzalloc(&dsi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(dev, struct lincoln_lcd197_panel, panel,
+ &lincoln_lcd197_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
mipi_dsi_set_drvdata(dsi, lcd);
lcd->dsi = dsi;
@@ -214,9 +216,6 @@ static int lincoln_lcd197_panel_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(lcd->reset_gpio),
"failed to get reset gpio");
- drm_panel_init(&lcd->panel, dev,
- &lincoln_lcd197_panel_funcs, DRM_MODE_CONNECTOR_DSI);
-
err = drm_panel_of_backlight(&lcd->panel);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
index ba6c015aabba..23fd535d8f47 100644
--- a/drivers/gpu/drm/panel/panel-lvds.c
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -164,9 +164,11 @@ static int panel_lvds_probe(struct platform_device *pdev)
struct panel_lvds *lvds;
int ret;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_panel_alloc(&pdev->dev, struct panel_lvds, panel,
+ &panel_lvds_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = &pdev->dev;
@@ -214,10 +216,6 @@ static int panel_lvds_probe(struct platform_device *pdev)
* driver.
*/
- /* Register the panel. */
- drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs,
- DRM_MODE_CONNECTOR_LVDS);
-
ret = drm_panel_of_backlight(&lvds->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
index 799c2161fc85..cde168ec631c 100644
--- a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
+++ b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
@@ -370,9 +370,11 @@ static int d53e6ea8966_probe(struct spi_device *spi)
.node = NULL,
};
- db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
+ db = devm_drm_panel_alloc(dev, struct d53e6ea8966, panel,
+ &d53e6ea8966_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(db))
+ return PTR_ERR(db);
spi_set_drvdata(spi, db);
@@ -425,9 +427,6 @@ static int d53e6ea8966_probe(struct spi_device *spi)
db->dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&db->panel, dev, &d53e6ea8966_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
if (db->panel_info->backlight_register) {
ret = db->panel_info->backlight_register(db);
if (ret < 0)
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 4db852ffb0f6..55664f5d5aa5 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -234,9 +234,11 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
struct mantix *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct mantix, panel, &mantix_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
ctx->default_mode = of_device_get_match_data(dev);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
@@ -271,9 +273,6 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(ctx->vddi))
return dev_err_probe(dev, PTR_ERR(ctx->vddi), "Failed to request vddi regulator\n");
- drm_panel_init(&ctx->panel, dev, &mantix_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index 81c5c541a351..d5c7210de4af 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -178,9 +178,10 @@ static int nl8048_probe(struct spi_device *spi)
struct nl8048_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct nl8048_panel, panel,
+ &nl8048_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -204,9 +205,6 @@ static int nl8048_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
index b6429795e8f5..22560384e48e 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3051d.c
@@ -361,9 +361,11 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
struct panel_nv3051d *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct panel_nv3051d, panel,
+ &panel_nv3051d_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -391,9 +393,6 @@ static int panel_nv3051d_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = ctx->panel_info->mode_flags;
- drm_panel_init(&ctx->panel, &dsi->dev, &panel_nv3051d_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index 06e16a7c14a7..0db9cadd868e 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -777,9 +777,10 @@ static int nv3052c_probe(struct spi_device *spi)
struct nv3052c *priv;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct nv3052c, panel, &nv3052c_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->dev = dev;
@@ -803,9 +804,6 @@ static int nv3052c_probe(struct spi_device *spi)
spi_set_drvdata(spi, priv);
- drm_panel_init(&priv->panel, dev, &nv3052c_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&priv->panel);
if (err)
return dev_err_probe(dev, err, "Failed to attach backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 549b86f2cc28..3189d89c7ca0 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -1087,9 +1087,12 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
struct nt35510 *nt;
int ret;
- nt = devm_kzalloc(dev, sizeof(struct nt35510), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35510, panel,
+ &nt35510_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
+
mipi_dsi_set_drvdata(dsi, nt);
nt->dev = dev;
@@ -1142,9 +1145,6 @@ static int nt35510_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(nt->reset_gpio);
}
- drm_panel_init(&nt->panel, dev, &nt35510_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
/*
* First, try to locate an external backlight (such as on GPIO)
* if this fails, assume we will want to use the internal backlight
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 5bbea734123b..98f0782c8411 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -456,9 +456,12 @@ static int nt35560_probe(struct mipi_dsi_device *dsi)
struct nt35560 *nt;
int ret;
- nt = devm_kzalloc(dev, sizeof(struct nt35560), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35560, panel,
+ &nt35560_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
+
nt->video_mode = of_property_read_bool(dev->of_node,
"enforce-video-mode");
@@ -502,9 +505,6 @@ static int nt35560_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(nt->reset_gpio),
"failed to request GPIO\n");
- drm_panel_init(&nt->panel, dev, &nt35560_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
nt->panel.backlight = devm_backlight_device_register(dev, "nt35560", dev, nt,
&nt35560_bl_ops, &nt35560_bl_props);
if (IS_ERR(nt->panel.backlight))
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 08b22b592ab0..94aa6489d99f 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -449,9 +449,10 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
const struct mipi_dsi_device_info *info;
int i, num_dsis = 1, ret;
- nt = devm_kzalloc(dev, sizeof(*nt), GFP_KERNEL);
- if (!nt)
- return -ENOMEM;
+ nt = devm_drm_panel_alloc(dev, struct nt35950, panel, &nt35950_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(nt))
+ return PTR_ERR(nt);
ret = nt35950_sharp_init_vregs(nt, dev);
if (ret)
@@ -491,9 +492,6 @@ static int nt35950_probe(struct mipi_dsi_device *dsi)
nt->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, nt);
- drm_panel_init(&nt->panel, dev, &nt35950_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&nt->panel);
if (ret) {
if (num_dsis == 2)
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 116d67bfa114..32cf64c7c18b 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -1171,9 +1171,11 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
const struct mipi_dsi_device_info *info;
int i, ret;
- pinfo = devm_kzalloc(dev, sizeof(*pinfo), GFP_KERNEL);
- if (!pinfo)
- return -ENOMEM;
+ pinfo = devm_drm_panel_alloc(dev, struct panel_info, panel,
+ &nt36523_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(pinfo))
+ return PTR_ERR(pinfo);
pinfo->vddio = devm_regulator_get(dev, "vddio");
if (IS_ERR(pinfo->vddio))
@@ -1211,7 +1213,6 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
pinfo->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, pinfo);
- drm_panel_init(&pinfo->panel, dev, &nt36523_panel_funcs, DRM_MODE_CONNECTOR_DSI);
ret = of_drm_get_panel_orientation(dev->of_node, &pinfo->orientation);
if (ret < 0) {
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
index 8c9e04207ba9..c5e00eb55722 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36672e.c
@@ -522,9 +522,11 @@ static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
struct nt36672e_panel *ctx;
int i, ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct nt36672e_panel, panel,
+ &nt36672e_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = of_device_get_match_data(dev);
if (!ctx->desc) {
@@ -553,8 +555,6 @@ static int nt36672e_panel_probe(struct mipi_dsi_device *dsi)
dsi->format = ctx->desc->format;
dsi->mode_flags = ctx->desc->mode_flags;
- drm_panel_init(&ctx->panel, dev, &nt36672e_drm_funcs, DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index 9fa7654e2b67..a629976bae54 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -246,9 +246,10 @@ static int nt39016_probe(struct spi_device *spi)
struct nt39016 *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct nt39016, drm_panel, &nt39016_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
spi_set_drvdata(spi, panel);
@@ -279,9 +280,6 @@ static int nt39016_probe(struct spi_device *spi)
return PTR_ERR(panel->map);
}
- drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->drm_panel);
if (err)
return dev_err_probe(dev, err, "Failed to get backlight handle\n");
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 94ae8c8270b8..66f99982f360 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -175,9 +175,11 @@ static int lcd_olinuxino_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_READ_I2C_BLOCK))
return -ENODEV;
- lcd = devm_kzalloc(dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(dev, struct lcd_olinuxino, panel,
+ &lcd_olinuxino_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
i2c_set_clientdata(client, lcd);
lcd->dev = dev;
@@ -234,9 +236,6 @@ static int lcd_olinuxino_probe(struct i2c_client *client)
if (IS_ERR(lcd->enable_gpio))
return PTR_ERR(lcd->enable_gpio);
- drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&lcd->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
index fc87f61d4400..3231e84dc66c 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-ota5601a.c
@@ -237,9 +237,11 @@ static int ota5601a_probe(struct spi_device *spi)
struct ota5601a *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct ota5601a, drm_panel,
+ &ota5601a_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
spi_set_drvdata(spi, panel);
@@ -273,9 +275,6 @@ static int ota5601a_probe(struct spi_device *spi)
return PTR_ERR(panel->map);
}
- drm_panel_init(&panel->drm_panel, dev, &ota5601a_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->drm_panel);
if (err) {
if (err != -EPROBE_DEFER)
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 87bbb25d119a..a0f58c3b73f6 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -424,9 +424,11 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
struct otm8009a *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct otm8009a, panel,
+ &otm8009a_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -451,9 +453,6 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev),
dev, ctx,
&otm8009a_backlight_ops,
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index b2029e035635..2af6aa47a551 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -527,9 +527,11 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
int ret;
u32 video_mode;
- panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct rad_panel, panel,
+ &rad_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
mipi_dsi_set_drvdata(dsi, panel);
@@ -586,8 +588,6 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi)
if (ret)
return ret;
- drm_panel_init(&panel->panel, dev, &rad_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
dev_set_drvdata(dev, panel);
drm_panel_add(&panel->panel);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67200.c b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
index 64b685dc11f6..1da8225e6f7a 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67200.c
@@ -391,9 +391,11 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
struct raydium_rm67200 *ctx;
int ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct raydium_rm67200, panel,
+ &raydium_rm67200_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->panel_info = device_get_match_data(dev);
if (!ctx->panel_info)
@@ -421,9 +423,6 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_LPM;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &raydium_rm67200_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 7b7fe987e292..669b5f5c1ad9 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -327,9 +327,11 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
struct rm68200 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm68200, panel,
+ &rm68200_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio)) {
@@ -355,9 +357,6 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm692e5.c b/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
index ea1b728e85a2..8e9484768657 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm692e5.c
@@ -281,9 +281,11 @@ static int rm692e5_probe(struct mipi_dsi_device *dsi)
struct rm692e5_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm692e5_panel, panel,
+ &rm692e5_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vddio";
ctx->supplies[1].supply = "dvdd";
@@ -306,8 +308,6 @@ static int rm692e5_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &rm692e5_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = rm692e5_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm69380.c b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
index d3071c01aaea..86769cadec97 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm69380.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm69380.c
@@ -208,9 +208,11 @@ static int rm69380_probe(struct mipi_dsi_device *dsi)
struct device_node *dsi_sec;
int ret, i;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct rm69380_panel, panel,
+ &rm69380_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vddio";
ctx->supplies[1].supply = "avdd";
@@ -248,8 +250,6 @@ static int rm69380_probe(struct mipi_dsi_device *dsi)
ctx->dsi[0] = dsi;
mipi_dsi_set_drvdata(dsi, ctx);
- drm_panel_init(&ctx->panel, dev, &rm69380_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = rm69380_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-renesas-r61307.c b/drivers/gpu/drm/panel/panel-renesas-r61307.c
new file mode 100644
index 000000000000..319415194839
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-renesas-r61307.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define R61307_MACP 0xb0 /* Manufacturer CMD Protect */
+#define R61307_MACP_ON 0x03
+#define R61307_MACP_OFF 0x04
+
+#define R61307_INVERSION 0xc1
+#define R61307_GAMMA_SET_A 0xc8 /* Gamma Setting A */
+#define R61307_GAMMA_SET_B 0xc9 /* Gamma Setting B */
+#define R61307_GAMMA_SET_C 0xca /* Gamma Setting C */
+#define R61307_CONTRAST_SET 0xcc
+
+struct renesas_r61307 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator *vcc_supply;
+ struct regulator *iovcc_supply;
+
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+
+ bool dig_cont_adj;
+ bool inversion;
+ u32 gamma;
+};
+
+static const u8 gamma_setting[][25] = {
+ { /* sentinel */ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x00, 0x06, 0x0a, 0x0f,
+ 0x14, 0x1f, 0x1f, 0x17,
+ 0x12, 0x0c, 0x09, 0x06,
+ 0x00, 0x06, 0x0a, 0x0f,
+ 0x14, 0x1f, 0x1f, 0x17,
+ 0x12, 0x0c, 0x09, 0x06
+ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x00, 0x05, 0x0b, 0x0f,
+ 0x11, 0x1d, 0x20, 0x18,
+ 0x18, 0x09, 0x07, 0x06,
+ 0x00, 0x05, 0x0b, 0x0f,
+ 0x11, 0x1d, 0x20, 0x18,
+ 0x18, 0x09, 0x07, 0x06
+ },
+ {
+ R61307_GAMMA_SET_A,
+ 0x0b, 0x0d, 0x10, 0x14,
+ 0x13, 0x1d, 0x20, 0x18,
+ 0x12, 0x09, 0x07, 0x06,
+ 0x0a, 0x0c, 0x10, 0x14,
+ 0x13, 0x1d, 0x20, 0x18,
+ 0x12, 0x09, 0x07, 0x06
+ },
+};
+
+static inline struct renesas_r61307 *to_renesas_r61307(struct drm_panel *panel)
+{
+ return container_of(panel, struct renesas_r61307, panel);
+}
+
+static void renesas_r61307_reset(struct renesas_r61307 *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(2000, 3000);
+}
+
+static int renesas_r61307_prepare(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct device *dev = &priv->dsi->dev;
+ int ret;
+
+ if (priv->prepared)
+ return 0;
+
+ ret = regulator_enable(priv->vcc_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable vcc power supply\n");
+ return ret;
+ }
+
+ usleep_range(2000, 3000);
+
+ ret = regulator_enable(priv->iovcc_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable iovcc power supply\n");
+ return ret;
+ }
+
+ usleep_range(2000, 3000);
+
+ renesas_r61307_reset(priv);
+
+ priv->prepared = true;
+ return 0;
+}
+
+static int renesas_r61307_enable(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 80);
+
+ mipi_dsi_dcs_write_seq_multi(&ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_msleep(&ctx, 20);
+
+ mipi_dsi_dcs_set_pixel_format_multi(&ctx, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+
+ /* MACP Off */
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_MACP, R61307_MACP_OFF);
+
+ if (priv->dig_cont_adj)
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_CONTRAST_SET,
+ 0xdc, 0xb4, 0xff);
+
+ if (priv->gamma)
+ mipi_dsi_generic_write_multi(&ctx, gamma_setting[priv->gamma],
+ sizeof(gamma_setting[priv->gamma]));
+
+ if (priv->inversion)
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_INVERSION,
+ 0x00, 0x50, 0x03, 0x22,
+ 0x16, 0x06, 0x60, 0x11);
+ else
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_INVERSION,
+ 0x00, 0x10, 0x03, 0x22,
+ 0x16, 0x06, 0x60, 0x01);
+
+ /* MACP On */
+ mipi_dsi_generic_write_seq_multi(&ctx, R61307_MACP, R61307_MACP_ON);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 50);
+
+ return 0;
+}
+
+static int renesas_r61307_disable(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 100);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+
+ return 0;
+}
+
+static int renesas_r61307_unprepare(struct drm_panel *panel)
+{
+ struct renesas_r61307 *priv = to_renesas_r61307(panel);
+
+ if (!priv->prepared)
+ return 0;
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ regulator_disable(priv->iovcc_supply);
+ usleep_range(2000, 3000);
+ regulator_disable(priv->vcc_supply);
+
+ priv->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode renesas_r61307_mode = {
+ .clock = (768 + 116 + 81 + 5) * (1024 + 24 + 8 + 2) * 60 / 1000,
+ .hdisplay = 768,
+ .hsync_start = 768 + 116,
+ .hsync_end = 768 + 116 + 81,
+ .htotal = 768 + 116 + 81 + 5,
+ .vdisplay = 1024,
+ .vsync_start = 1024 + 24,
+ .vsync_end = 1024 + 24 + 8,
+ .vtotal = 1024 + 24 + 8 + 2,
+ .width_mm = 76,
+ .height_mm = 101,
+};
+
+static int renesas_r61307_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &renesas_r61307_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs renesas_r61307_panel_funcs = {
+ .prepare = renesas_r61307_prepare,
+ .enable = renesas_r61307_enable,
+ .disable = renesas_r61307_disable,
+ .unprepare = renesas_r61307_unprepare,
+ .get_modes = renesas_r61307_get_modes,
+};
+
+static int renesas_r61307_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct renesas_r61307 *priv;
+ int ret;
+
+ priv = devm_drm_panel_alloc(dev, struct renesas_r61307, panel,
+ &renesas_r61307_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ priv->vcc_supply = devm_regulator_get(dev, "vcc");
+ if (IS_ERR(priv->vcc_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vcc_supply),
+ "Failed to get vcc-supply\n");
+
+ priv->iovcc_supply = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(priv->iovcc_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->iovcc_supply),
+ "Failed to get iovcc-supply\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset gpios\n");
+
+ if (device_property_read_bool(dev, "renesas,inversion"))
+ priv->inversion = true;
+
+ if (device_property_read_bool(dev, "renesas,contrast"))
+ priv->dig_cont_adj = true;
+
+ priv->gamma = 0;
+ device_property_read_u32(dev, "renesas,gamma", &priv->gamma);
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&priv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&priv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&priv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void renesas_r61307_remove(struct mipi_dsi_device *dsi)
+{
+ struct renesas_r61307 *priv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id renesas_r61307_of_match[] = {
+ { .compatible = "hit,tx13d100vm0eaa" },
+ { .compatible = "koe,tx13d100vm0eaa" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_r61307_of_match);
+
+static struct mipi_dsi_driver renesas_r61307_driver = {
+ .probe = renesas_r61307_probe,
+ .remove = renesas_r61307_remove,
+ .driver = {
+ .name = "panel-renesas-r61307",
+ .of_match_table = renesas_r61307_of_match,
+ },
+};
+module_mipi_dsi_driver(renesas_r61307_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Renesas R61307-based panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-renesas-r69328.c b/drivers/gpu/drm/panel/panel-renesas-r69328.c
new file mode 100644
index 000000000000..46287ab04c30
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-renesas-r69328.c
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#define R69328_MACP 0xb0 /* Manufacturer Access CMD Protect */
+#define R69328_MACP_ON 0x03
+#define R69328_MACP_OFF 0x04
+
+#define R69328_GAMMA_SET_A 0xc8 /* Gamma Setting A */
+#define R69328_GAMMA_SET_B 0xc9 /* Gamma Setting B */
+#define R69328_GAMMA_SET_C 0xca /* Gamma Setting C */
+
+#define R69328_POWER_SET 0xd1
+
+struct renesas_r69328 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator *vdd_supply;
+ struct regulator *vddio_supply;
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline struct renesas_r69328 *to_renesas_r69328(struct drm_panel *panel)
+{
+ return container_of(panel, struct renesas_r69328, panel);
+}
+
+static void renesas_r69328_reset(struct renesas_r69328 *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(2000, 3000);
+}
+
+static int renesas_r69328_prepare(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct device *dev = &priv->dsi->dev;
+ int ret;
+
+ if (priv->prepared)
+ return 0;
+
+ ret = regulator_enable(priv->vdd_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable vdd power supply\n");
+ return ret;
+ }
+
+ usleep_range(10000, 11000);
+
+ ret = regulator_enable(priv->vddio_supply);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable vddio power supply\n");
+ return ret;
+ }
+
+ usleep_range(10000, 11000);
+
+ renesas_r69328_reset(priv);
+
+ priv->prepared = true;
+ return 0;
+}
+
+static int renesas_r69328_enable(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ /* Set address mode */
+ mipi_dsi_dcs_write_seq_multi(&ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
+ mipi_dsi_dcs_set_pixel_format_multi(&ctx, MIPI_DCS_PIXEL_FMT_24BIT << 4);
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+
+ mipi_dsi_msleep(&ctx, 100);
+
+ /* MACP Off */
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_MACP, R69328_MACP_OFF);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_POWER_SET, 0x14, 0x1d,
+ 0x21, 0x67, 0x11, 0x9a);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_A, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_B, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_GAMMA_SET_C, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00, 0x00, 0x1a,
+ 0x20, 0x28, 0x25, 0x24, 0x26, 0x15, 0x13,
+ 0x11, 0x18, 0x1e, 0x1c, 0x00);
+
+ /* MACP On */
+ mipi_dsi_generic_write_seq_multi(&ctx, R69328_MACP, R69328_MACP_ON);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 50);
+
+ return 0;
+}
+
+static int renesas_r69328_disable(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 60);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+
+ return 0;
+}
+
+static int renesas_r69328_unprepare(struct drm_panel *panel)
+{
+ struct renesas_r69328 *priv = to_renesas_r69328(panel);
+
+ if (!priv->prepared)
+ return 0;
+
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+
+ usleep_range(5000, 6000);
+
+ regulator_disable(priv->vddio_supply);
+ regulator_disable(priv->vdd_supply);
+
+ priv->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode renesas_r69328_mode = {
+ .clock = (720 + 92 + 62 + 4) * (1280 + 6 + 3 + 1) * 60 / 1000,
+ .hdisplay = 720,
+ .hsync_start = 720 + 92,
+ .hsync_end = 720 + 92 + 62,
+ .htotal = 720 + 92 + 62 + 4,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 6,
+ .vsync_end = 1280 + 6 + 3,
+ .vtotal = 1280 + 6 + 3 + 1,
+ .width_mm = 59,
+ .height_mm = 105,
+};
+
+static int renesas_r69328_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &renesas_r69328_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs renesas_r69328_panel_funcs = {
+ .prepare = renesas_r69328_prepare,
+ .enable = renesas_r69328_enable,
+ .disable = renesas_r69328_disable,
+ .unprepare = renesas_r69328_unprepare,
+ .get_modes = renesas_r69328_get_modes,
+};
+
+static int renesas_r69328_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct renesas_r69328 *priv;
+ int ret;
+
+ priv = devm_drm_panel_alloc(dev, struct renesas_r69328, panel,
+ &renesas_r69328_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ priv->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(priv->vdd_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vdd_supply),
+ "Failed to get vdd-supply\n");
+
+ priv->vddio_supply = devm_regulator_get(dev, "vddio");
+ if (IS_ERR(priv->vddio_supply))
+ return dev_err_probe(dev, PTR_ERR(priv->vddio_supply),
+ "Failed to get vddio-supply\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&priv->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&priv->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret) {
+ drm_panel_remove(&priv->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void renesas_r69328_remove(struct mipi_dsi_device *dsi)
+{
+ struct renesas_r69328 *priv = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id renesas_r69328_of_match[] = {
+ { .compatible = "jdi,dx12d100vm0eaa" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, renesas_r69328_of_match);
+
+static struct mipi_dsi_driver renesas_r69328_driver = {
+ .probe = renesas_r69328_probe,
+ .remove = renesas_r69328_remove,
+ .driver = {
+ .name = "panel-renesas-r69328",
+ .of_match_table = renesas_r69328_of_match,
+ },
+};
+module_mipi_dsi_driver(renesas_r69328_driver);
+
+MODULE_AUTHOR("Maxim Schwalm <maxim.schwalm@gmail.com>");
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Renesas R69328-based panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index 2ef5ea5eaeeb..ad35d0fb0a16 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -143,9 +143,11 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
struct rb070d30_panel *ctx;
int ret;
- ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct rb070d30_panel, panel,
+ &rb070d30_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd");
if (IS_ERR(ctx->supply))
@@ -154,9 +156,6 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->gpios.reset)) {
dev_err(&dsi->dev, "Couldn't get our reset GPIO\n");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
index cf6186312252..188dd7cf0297 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ams581vf01.c
@@ -211,9 +211,11 @@ static int ams581vf01_probe(struct mipi_dsi_device *dsi)
struct ams581vf01 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(&dsi->dev, struct ams581vf01, panel,
+ &ams581vf01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(ams581vf01_supplies),
@@ -235,8 +237,6 @@ static int ams581vf01_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &ams581vf01_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = ams581vf01_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
index 817365cb5e46..f8ebbd4a530b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ams639rq08.c
@@ -257,9 +257,11 @@ static int ams639rq08_probe(struct mipi_dsi_device *dsi)
struct ams639rq08 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ams639rq08, panel,
+ &ams639rq08_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(ams639rq08_supplies),
@@ -281,8 +283,6 @@ static int ams639rq08_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &ams639rq08_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = ams639rq08_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
index 9a482a744b8c..20ec27d2d6c2 100644
--- a/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
+++ b/drivers/gpu/drm/panel/panel-samsung-atna33xc20.c
@@ -266,9 +266,12 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
struct device *dev = &aux_ep->dev;
int ret;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct atana33xc20_panel, base,
+ &atana33xc20_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
dev_set_drvdata(dev, panel);
panel->aux = aux_ep->aux;
@@ -301,8 +304,6 @@ static int atana33xc20_probe(struct dp_aux_ep_device *aux_ep)
if (ret)
return ret;
- drm_panel_init(&panel->base, dev, &atana33xc20_funcs, DRM_MODE_CONNECTOR_eDP);
-
pm_runtime_get_sync(dev);
ret = drm_panel_dp_aux_backlight(&panel->base, aux_ep->aux);
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/gpu/drm/panel/panel-samsung-db7430.c b/drivers/gpu/drm/panel/panel-samsung-db7430.c
index 14c6700e37b3..a97182f3c990 100644
--- a/drivers/gpu/drm/panel/panel-samsung-db7430.c
+++ b/drivers/gpu/drm/panel/panel-samsung-db7430.c
@@ -267,9 +267,11 @@ static int db7430_probe(struct spi_device *spi)
struct db7430 *db;
int ret;
- db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
- if (!db)
- return -ENOMEM;
+ db = devm_drm_panel_alloc(dev, struct db7430, panel, &db7430_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(db))
+ return PTR_ERR(db);
+
db->dev = dev;
/*
@@ -294,9 +296,6 @@ static int db7430_probe(struct spi_device *spi)
if (ret)
return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
- drm_panel_init(&db->panel, dev, &db7430_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
/* FIXME: if no external backlight, use internal backlight */
ret = drm_panel_of_backlight(&db->panel);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 9f438683a6f6..c7f2241523a0 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -339,9 +339,11 @@ static int ld9040_probe(struct spi_device *spi)
struct ld9040 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct ld9040, panel,
+ &ld9040_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
spi_set_drvdata(spi, ctx);
@@ -373,9 +375,6 @@ static int ld9040_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
bldev = devm_backlight_device_register(dev, dev_name(dev), dev,
ctx, &ld9040_bl_ops,
&ld9040_bl_props);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 79f611963c61..ba1a02000bb9 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -166,9 +166,11 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
struct s6d16d0 *s6;
int ret;
- s6 = devm_kzalloc(dev, sizeof(struct s6d16d0), GFP_KERNEL);
- if (!s6)
- return -ENOMEM;
+ s6 = devm_drm_panel_alloc(dev, struct s6d16d0, panel,
+ &s6d16d0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(s6))
+ return PTR_ERR(s6);
mipi_dsi_set_drvdata(dsi, s6);
s6->dev = dev;
@@ -200,9 +202,6 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi)
return ret;
}
- drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&s6->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
index 2adb223a895c..300dc19bd9d1 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
@@ -247,9 +247,11 @@ static int s6d27a1_probe(struct spi_device *spi)
struct s6d27a1 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6d27a1, panel,
+ &s6d27a1_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->dev = dev;
@@ -277,9 +279,6 @@ static int s6d27a1_probe(struct spi_device *spi)
ctx->dbi.read_commands = s6d27a1_dbi_read_commands;
- drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "failed to add backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
index 93f11e2e9398..b5b9e80690f6 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
@@ -392,9 +392,11 @@ static int s6d7aa0_probe(struct mipi_dsi_device *dsi)
struct s6d7aa0 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6d7aa0, panel,
+ &s6d7aa0_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->desc = of_device_get_match_data(dev);
if (!ctx->desc)
@@ -420,8 +422,6 @@ static int s6d7aa0_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
| ctx->desc->mode_flags;
- drm_panel_init(&ctx->panel, dev, &s6d7aa0_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ret = drm_panel_of_backlight(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
index 27a059b55ae5..f4d75eca3cdf 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c
@@ -185,9 +185,11 @@ static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
struct s6e3fa7_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e3fa7_panel, panel,
+ &s6e3fa7_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(ctx->reset_gpio))
@@ -202,8 +204,6 @@ static int s6e3fa7_panel_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
- drm_panel_init(&ctx->panel, dev, &s6e3fa7_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = s6e3fa7_panel_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index ab8b58545284..1db0c63b1131 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -681,9 +681,11 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
struct s6e3ha2 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e3ha2, panel,
+ &s6e3ha2_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -731,8 +733,6 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
ctx->bl_dev->props.power = BACKLIGHT_POWER_OFF;
- drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
index 64c6f7d45bed..550e9ef9bb71 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha8.c
@@ -253,9 +253,11 @@ static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi)
struct s6e3ha8 *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_panel_alloc(dev, struct s6e3ha8, panel,
+ &s6e3ha8_amb577px01_wqhd_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(s6e3ha8_supplies),
s6e3ha8_supplies,
@@ -279,8 +281,6 @@ static int s6e3ha8_amb577px01_wqhd_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&priv->panel, dev, &s6e3ha8_amb577px01_wqhd_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
priv->panel.prepare_prev_first = true;
drm_panel_add(&priv->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index 364f1c9a16d9..6f3d39556f92 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -437,9 +437,11 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
struct s6e63j0x03 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct s6e63j0x03), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e63j0x03, panel,
+ &s6e63j0x03_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -462,8 +464,6 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
"cannot get reset-gpio\n");
- drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 6917ffda5b2b..ea241c89593b 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -13,6 +13,7 @@
#include <linux/backlight.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/property.h>
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
index e92e95158d1f..e91f50662997 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams427ap24.c
@@ -687,9 +687,11 @@ static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi)
struct s6e88a0_ams427ap24 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e88a0_ams427ap24, panel,
+ &s6e88a0_ams427ap24_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(dev,
ARRAY_SIZE(s6e88a0_ams427ap24_supplies),
@@ -711,8 +713,6 @@ static int s6e88a0_ams427ap24_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_NO_EOT_PACKET | MIPI_DSI_MODE_VIDEO_NO_HFP;
- drm_panel_init(&ctx->panel, dev, &s6e88a0_ams427ap24_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->flip_horizontal = device_property_read_bool(dev, "flip-horizontal");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 57b1a899bbdc..ca5cad41ff1d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -165,9 +165,11 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
struct s6e88a0_ams452ef01 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e88a0_ams452ef01, panel,
+ &s6e88a0_ams452ef01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "vdd3";
ctx->supplies[1].supply = "vci";
@@ -192,9 +194,6 @@ static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
- drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
drm_panel_add(&ctx->panel);
ret = mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
index c51d07ec1529..897df195f2f3 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c
@@ -979,9 +979,11 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
struct s6e8aa0 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct s6e8aa0, panel,
+ &s6e8aa0_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
mipi_dsi_set_drvdata(dsi, ctx);
@@ -1014,8 +1016,6 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
ctx->brightness = GAMMA_LEVEL_NUM - 1;
- drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
drm_panel_add(&ctx->panel);
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index d92ae6b6100f..064258217d50 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -191,9 +191,11 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
struct sofef00_panel *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sofef00_panel, panel,
+ &sofef00_panel_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supply = devm_regulator_get(dev, "vddio");
if (IS_ERR(ctx->supply))
@@ -211,9 +213,6 @@ static int sofef00_panel_probe(struct mipi_dsi_device *dsi)
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
- drm_panel_init(&ctx->panel, dev, &sofef00_panel_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = sofef00_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 7d1b421ea9dd..0935d83ee2db 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -204,9 +204,11 @@ static int seiko_panel_probe(struct device *dev,
struct seiko_panel *panel;
int err;
- panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct seiko_panel, base,
+ &seiko_panel_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
panel->desc = desc;
@@ -224,9 +226,6 @@ static int seiko_panel_probe(struct device *dev,
return dev_err_probe(dev, PTR_ERR(panel->enable_gpio),
"failed to request GPIO\n");
- drm_panel_init(&panel->base, dev, &seiko_panel_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
err = drm_panel_of_backlight(&panel->base);
if (err)
return err;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index a9673a52b861..938beac4655d 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -138,9 +138,10 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
{
struct ls037v7dw01_panel *lcd;
- lcd = devm_kzalloc(&pdev->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&pdev->dev, struct ls037v7dw01_panel, panel,
+ &ls037v7dw01_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
platform_set_drvdata(pdev, lcd);
lcd->pdev = pdev;
@@ -181,9 +182,6 @@ static int ls037v7dw01_probe(struct platform_device *pdev)
return PTR_ERR(lcd->ud_gpio);
}
- drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index 0b4e0983639b..0456f3d705e7 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -193,9 +193,11 @@ static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
struct sharp_ls060 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sharp_ls060, panel,
+ &sharp_ls060_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->vddi_supply = devm_regulator_get(dev, "vddi");
if (IS_ERR(ctx->vddi_supply))
@@ -227,9 +229,6 @@ static int sharp_ls060_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &sharp_ls060_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 0a3b26bb4d73..47222d2d8129 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1268,6 +1268,30 @@ static const struct panel_desc auo_g190ean01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing auo_p238han01_timings = {
+ .pixelclock = { 107400000, 142400000, 180000000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 30, 70, 650 },
+ .hback_porch = { 30, 70, 650 },
+ .hsync_len = { 20, 40, 136 },
+ .vactive = { 1080, 1080, 1080 },
+ .vfront_porch = { 5, 19, 318 },
+ .vback_porch = { 5, 19, 318 },
+ .vsync_len = { 4, 12, 120 },
+};
+
+static const struct panel_desc auo_p238han01 = {
+ .timings = &auo_p238han01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 527,
+ .height = 296,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_p320hvn03_timings = {
.pixelclock = { 106000000, 148500000, 164000000 },
.hactive = { 1920, 1920, 1920 },
@@ -4939,6 +4963,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g190ean01",
.data = &auo_g190ean01,
}, {
+ .compatible = "auo,p238han01",
+ .data = &auo_p238han01,
+ }, {
.compatible = "auo,p320hvn03",
.data = &auo_p320hvn03,
}, {
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 1f72ef7ca74c..2f79ec4a2063 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -520,6 +520,28 @@ static void rg28xx_gip_sequence(struct st7701 *st7701)
st7701_switch_cmd_bkx(st7701, false, 0);
}
+static void wf40eswaa6mnn0_gip_sequence(struct st7701 *st7701)
+{
+ ST7701_WRITE(st7701, 0xE0, 0x00, 0x28, 0x02);
+ ST7701_WRITE(st7701, 0xE1, 0x08, 0xA0, 0x00, 0x00, 0x07, 0xA0, 0x00,
+ 0x00, 0x00, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE2, 0x11, 0x11, 0x44, 0x44, 0xED, 0xA0, 0x00,
+ 0x00, 0xEC, 0xA0, 0x00, 0x00);
+ ST7701_WRITE(st7701, 0xE3, 0x00, 0x00, 0x11, 0x11);
+ ST7701_WRITE(st7701, 0xE4, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE5, 0x0A, 0xE9, 0xD8, 0xA0, 0x0C, 0xEB, 0xD8,
+ 0xA0, 0x0E, 0xED, 0xD8, 0xA0, 0x10, 0xEF, 0xD8, 0xA0);
+ ST7701_WRITE(st7701, 0xE6, 0x00, 0x00, 0x11, 0x11);
+ ST7701_WRITE(st7701, 0xE7, 0x44, 0x44);
+ ST7701_WRITE(st7701, 0xE8, 0x09, 0xE8, 0xD8, 0xA0, 0x0B, 0xEA, 0xD8,
+ 0xA0, 0x0D, 0xEC, 0xD8, 0xA0, 0x0F, 0xEE, 0xD8, 0xA0);
+ ST7701_WRITE(st7701, 0xEB, 0x00, 0x00, 0xE4, 0xE4, 0x88, 0x00, 0x40);
+ ST7701_WRITE(st7701, 0xEC, 0x3C, 0x00);
+ ST7701_WRITE(st7701, 0xED, 0xAB, 0x89, 0x76, 0x54, 0x02, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x20, 0x45, 0x67, 0x98, 0xBA);
+ ST7701_WRITE(st7701, MIPI_DCS_SET_ADDRESS_MODE, 0);
+}
+
static int st7701_prepare(struct drm_panel *panel)
{
struct st7701 *st7701 = panel_to_st7701(panel);
@@ -1135,6 +1157,107 @@ static const struct st7701_panel_desc rg28xx_desc = {
.gip_sequence = rg28xx_gip_sequence,
};
+static const struct drm_display_mode wf40eswaa6mnn0_mode = {
+ .clock = 18306,
+
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 45,
+ .htotal = 480 + 2 + 45 + 13,
+
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 70,
+ .vtotal = 480 + 2 + 70 + 13,
+
+ .width_mm = 72,
+ .height_mm = 70,
+
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct st7701_panel_desc wf40eswaa6mnn0_desc = {
+ .mode = &wf40eswaa6mnn0_mode,
+ .lanes = 2,
+ .format = MIPI_DSI_FMT_RGB888,
+ .panel_sleep_delay = 0,
+
+ .pv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC0_MASK, 0x1),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0c),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x10),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x0c),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x08),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x04),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0x14),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x12),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0xb3),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x3a),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x1f)
+ },
+ .nv_gamma = {
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x13),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC4_MASK, 0x19),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC8_MASK, 0x1f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC16_MASK, 0x0f),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC24_MASK, 0x14),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC52_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC80_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC108_MASK, 0x08),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC147_MASK, 0x07),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC175_MASK, 0x22),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC203_MASK, 0x02),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC231_MASK, 0xf),
+
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC239_MASK, 0x0f),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC247_MASK, 0xa3),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC251_MASK, 0x29),
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_AJ_MASK, 0) |
+ CFIELD_PREP(ST7701_CMD2_BK0_GAMCTRL_VC255_MASK, 0x0d)
+ },
+ .nlinv = 3,
+ .vop_uv = 4737500,
+ .vcom_uv = 662500,
+ .vgh_mv = 15000,
+ .vgl_mv = -10170,
+ .avdd_mv = 6600,
+ .avcl_mv = -4600,
+ .gamma_op_bias = OP_BIAS_MIDDLE,
+ .input_op_bias = OP_BIAS_MIDDLE,
+ .output_op_bias = OP_BIAS_MIN,
+ .t2d_ns = 1600,
+ .t3d_ns = 10400,
+ .eot_en = true,
+ .gip_sequence = wf40eswaa6mnn0_gip_sequence,
+};
+
static void st7701_cleanup(void *data)
{
struct st7701 *st7701 = (struct st7701 *)data;
@@ -1150,9 +1273,10 @@ static int st7701_probe(struct device *dev, int connector_type)
struct st7701 *st7701;
int ret;
- st7701 = devm_kzalloc(dev, sizeof(*st7701), GFP_KERNEL);
- if (!st7701)
- return -ENOMEM;
+ st7701 = devm_drm_panel_alloc(dev, struct st7701, panel, &st7701_funcs,
+ connector_type);
+ if (IS_ERR(st7701))
+ return PTR_ERR(st7701);
desc = of_device_get_match_data(dev);
if (!desc)
@@ -1176,7 +1300,6 @@ static int st7701_probe(struct device *dev, int connector_type)
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to get orientation\n");
- drm_panel_init(&st7701->panel, dev, &st7701_funcs, connector_type);
st7701->panel.prepare_prev_first = true;
/**
@@ -1265,6 +1388,7 @@ static const struct of_device_id st7701_dsi_of_match[] = {
{ .compatible = "densitron,dmt028vghmcmi-1a", .data = &dmt028vghmcmi_1a_desc },
{ .compatible = "elida,kd50t048a", .data = &kd50t048a_desc },
{ .compatible = "techstar,ts8550b", .data = &ts8550b_desc },
+ { .compatible = "winstar,wf40eswaa6mnn0", .data = &wf40eswaa6mnn0_desc },
{ }
};
MODULE_DEVICE_TABLE(of, st7701_dsi_of_match);
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
index 67e8e45498cb..1a007a244d84 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c
@@ -846,9 +846,11 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
struct st7703 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct st7703, panel,
+ &st7703_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
@@ -876,9 +878,6 @@ static int st7703_probe(struct mipi_dsi_device *dsi)
if (ret < 0)
return dev_err_probe(&dsi->dev, ret, "Failed to get orientation\n");
- drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index 28bfc48a9127..04d91929eedd 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -612,9 +612,10 @@ static int st7789v_probe(struct spi_device *spi)
struct st7789v *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct st7789v, panel,
+ &st7789v_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
spi_set_drvdata(spi, ctx);
ctx->spi = spi;
@@ -626,9 +627,6 @@ static int st7789v_probe(struct spi_device *spi)
ctx->info = device_get_match_data(&spi->dev);
- drm_panel_init(&ctx->panel, dev, &st7789v_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ctx->power = devm_regulator_get(dev, "power");
ret = PTR_ERR_OR_ZERO(ctx->power);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index d437f5c84f5f..fe043de791b0 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -607,9 +607,10 @@ static int acx565akm_probe(struct spi_device *spi)
struct acx565akm_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct acx565akm_panel, panel,
+ &acx565akm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
spi->mode = SPI_MODE_3;
@@ -635,9 +636,6 @@ static int acx565akm_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
index 97f4bb4e1029..7c989b70ab51 100644
--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -175,9 +175,11 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
struct sony_td4353_jdi *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct sony_td4353_jdi, panel,
+ &sony_td4353_jdi_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->type = (uintptr_t)of_device_get_match_data(dev);
@@ -206,9 +208,6 @@ static int sony_td4353_jdi_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &sony_td4353_jdi_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return dev_err_probe(dev, ret, "Failed to get backlight\n");
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index 104b2290560e..216a6ad8696e 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -433,9 +433,11 @@ static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
struct truly_nt35521 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct truly_nt35521, panel,
+ &truly_nt35521_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->supplies[0].supply = "positive5";
ctx->supplies[1].supply = "negative5";
@@ -465,9 +467,6 @@ static int truly_nt35521_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &truly_nt35521_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = truly_nt35521_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-summit.c b/drivers/gpu/drm/panel/panel-summit.c
index e780faee1857..4854437e2899 100644
--- a/drivers/gpu/drm/panel/panel-summit.c
+++ b/drivers/gpu/drm/panel/panel-summit.c
@@ -68,9 +68,11 @@ static int summit_probe(struct mipi_dsi_device *dsi)
struct summit_data *s_data;
int ret;
- s_data = devm_kzalloc(dev, sizeof(*s_data), GFP_KERNEL);
- if (!s_data)
- return -ENOMEM;
+ s_data = devm_drm_panel_alloc(dev, struct summit_data, panel,
+ &summit_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(s_data))
+ return PTR_ERR(s_data);
mipi_dsi_set_drvdata(dsi, s_data);
s_data->dsi = dsi;
@@ -85,8 +87,6 @@ static int summit_probe(struct mipi_dsi_device *dsi)
if (IS_ERR(s_data->bl))
return PTR_ERR(s_data->bl);
- drm_panel_init(&s_data->panel, dev, &summit_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&s_data->panel);
return mipi_dsi_attach(dsi);
diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
index b148e6cba9bd..3a74d48753d9 100644
--- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c
+++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c
@@ -229,9 +229,11 @@ static int r63353_panel_probe(struct mipi_dsi_device *dsi)
struct device *dev = &dsi->dev;
struct r63353_panel *panel;
- panel = devm_kzalloc(&dsi->dev, sizeof(*panel), GFP_KERNEL);
- if (!panel)
- return -ENOMEM;
+ panel = devm_drm_panel_alloc(dev, struct r63353_panel, base,
+ &r63353_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
mipi_dsi_set_drvdata(dsi, panel);
panel->dsi = dsi;
@@ -258,9 +260,6 @@ static int r63353_panel_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(panel->reset_gpio);
}
- drm_panel_init(&panel->base, dev, &r63353_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
panel->base.prepare_prev_first = true;
ret = drm_panel_of_backlight(&panel->base);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index 11d460d2ea19..ee86ff20c1bd 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -318,9 +318,11 @@ static int td028ttec1_probe(struct spi_device *spi)
struct td028ttec1_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (!lcd)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct td028ttec1_panel, panel,
+ &td028ttec1_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -334,9 +336,6 @@ static int td028ttec1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&lcd->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index cf4609bb9b1d..b18af526b54c 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -421,9 +421,10 @@ static int td043mtea1_probe(struct spi_device *spi)
struct td043mtea1_panel *lcd;
int ret;
- lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
- if (lcd == NULL)
- return -ENOMEM;
+ lcd = devm_drm_panel_alloc(&spi->dev, struct td043mtea1_panel, panel,
+ &td043mtea1_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(lcd))
+ return PTR_ERR(lcd);
spi_set_drvdata(spi, lcd);
lcd->spi = spi;
@@ -455,9 +456,6 @@ static int td043mtea1_probe(struct spi_device *spi)
return ret;
}
- drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
drm_panel_add(&lcd->panel);
return 0;
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index f6a212e542cb..0beba5c08956 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -405,9 +405,11 @@ static int tpg110_probe(struct spi_device *spi)
struct tpg110 *tpg;
int ret;
- tpg = devm_kzalloc(dev, sizeof(*tpg), GFP_KERNEL);
- if (!tpg)
- return -ENOMEM;
+ tpg = devm_drm_panel_alloc(dev, struct tpg110, panel,
+ &tpg110_drm_funcs, DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(tpg))
+ return PTR_ERR(tpg);
+
tpg->dev = dev;
/* We get the physical display dimensions from the DT */
@@ -438,9 +440,6 @@ static int tpg110_probe(struct spi_device *spi)
if (ret)
return ret;
- drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&tpg->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
index 3ea0a86f6e69..690cccedd438 100644
--- a/drivers/gpu/drm/panel/panel-visionox-r66451.c
+++ b/drivers/gpu/drm/panel/panel-visionox-r66451.c
@@ -255,9 +255,11 @@ static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
struct drm_dsc_config *dsc;
int ret = 0;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_r66451, panel,
+ &visionox_r66451_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
dsc = devm_kzalloc(dev, sizeof(*dsc), GFP_KERNEL);
if (!dsc)
@@ -297,7 +299,6 @@ static int visionox_r66451_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_CLOCK_NON_CONTINUOUS;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &visionox_r66451_funcs, DRM_MODE_CONNECTOR_DSI);
ctx->panel.backlight = visionox_r66451_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index be3a9797fbce..909c280eab1f 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -5,6 +5,7 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/mod_devicetable.h>
#include <linux/gpio/consumer.h>
#include <linux/regulator/consumer.h>
@@ -15,11 +16,138 @@
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+struct visionox_rm69299_panel_desc {
+ const struct drm_display_mode *mode;
+ const u8 *init_seq;
+ unsigned int init_seq_len;
+};
+
struct visionox_rm69299 {
struct drm_panel panel;
- struct regulator_bulk_data supplies[2];
+ struct regulator_bulk_data *supplies;
struct gpio_desc *reset_gpio;
struct mipi_dsi_device *dsi;
+ const struct visionox_rm69299_panel_desc *desc;
+};
+
+static const struct regulator_bulk_data visionox_rm69299_supplies[] = {
+ { .supply = "vdda", .init_load_uA = 32000 },
+ { .supply = "vdd3p3", .init_load_uA = 13200 },
+};
+
+static const u8 visionox_rm69299_1080x2248_60hz_init_seq[][2] = {
+ { 0xfe, 0x00 }, { 0xc2, 0x08 }, { 0x35, 0x00 }, { 0x51, 0xff },
+};
+
+static const u8 visionox_rm69299_1080x2160_60hz_init_seq[][2] = {
+ { 0xfe, 0x40 }, { 0x05, 0x04 }, { 0x06, 0x08 }, { 0x08, 0x04 },
+ { 0x09, 0x08 }, { 0x0a, 0x07 }, { 0x0b, 0xcc }, { 0x0c, 0x07 },
+ { 0x0d, 0x90 }, { 0x0f, 0x87 }, { 0x20, 0x8d }, { 0x21, 0x8d },
+ { 0x24, 0x05 }, { 0x26, 0x05 }, { 0x28, 0x05 }, { 0x2a, 0x05 },
+ { 0x2d, 0x28 }, { 0x2f, 0x28 }, { 0x30, 0x32 }, { 0x31, 0x32 },
+ { 0x37, 0x80 }, { 0x38, 0x30 }, { 0x39, 0xa8 }, { 0x46, 0x48 },
+ { 0x47, 0x48 }, { 0x6b, 0x10 }, { 0x6f, 0x02 }, { 0x74, 0x2b },
+ { 0x80, 0x1a }, { 0xfe, 0x40 }, { 0x93, 0x10 }, { 0x16, 0x00 },
+ { 0x85, 0x07 }, { 0x84, 0x01 }, { 0x86, 0x0f }, { 0x87, 0x05 },
+ { 0x8c, 0x00 }, { 0x88, 0x2e }, { 0x89, 0x2e }, { 0x8b, 0x09 },
+ { 0x95, 0x00 }, { 0x91, 0x00 }, { 0x90, 0x00 }, { 0x8d, 0xd0 },
+ { 0x8a, 0x03 }, { 0xfe, 0xa0 }, { 0x13, 0x00 }, { 0x33, 0x00 },
+ { 0x0b, 0x33 }, { 0x36, 0x1e }, { 0x31, 0x88 }, { 0x32, 0x88 },
+ { 0x37, 0xf1 }, { 0xfe, 0x50 }, { 0x00, 0x00 }, { 0x01, 0x00 },
+ { 0x02, 0x00 }, { 0x03, 0xe9 }, { 0x04, 0x00 }, { 0x05, 0xf6 },
+ { 0x06, 0x01 }, { 0x07, 0x2c }, { 0x08, 0x01 }, { 0x09, 0x62 },
+ { 0x0a, 0x01 }, { 0x0b, 0x98 }, { 0x0c, 0x01 }, { 0x0d, 0xbf },
+ { 0x0e, 0x01 }, { 0x0f, 0xf6 }, { 0x10, 0x02 }, { 0x11, 0x24 },
+ { 0x12, 0x02 }, { 0x13, 0x4e }, { 0x14, 0x02 }, { 0x15, 0x70 },
+ { 0x16, 0x02 }, { 0x17, 0xaf }, { 0x18, 0x02 }, { 0x19, 0xe2 },
+ { 0x1a, 0x03 }, { 0x1b, 0x1f }, { 0x1c, 0x03 }, { 0x1d, 0x52 },
+ { 0x1e, 0x03 }, { 0x1f, 0x82 }, { 0x20, 0x03 }, { 0x21, 0xb6 },
+ { 0x22, 0x03 }, { 0x23, 0xf0 }, { 0x24, 0x04 }, { 0x25, 0x1f },
+ { 0x26, 0x04 }, { 0x27, 0x37 }, { 0x28, 0x04 }, { 0x29, 0x59 },
+ { 0x2a, 0x04 }, { 0x2b, 0x68 }, { 0x30, 0x04 }, { 0x31, 0x85 },
+ { 0x32, 0x04 }, { 0x33, 0xa2 }, { 0x34, 0x04 }, { 0x35, 0xbc },
+ { 0x36, 0x04 }, { 0x37, 0xd8 }, { 0x38, 0x04 }, { 0x39, 0xf4 },
+ { 0x3a, 0x05 }, { 0x3b, 0x0e }, { 0x40, 0x05 }, { 0x41, 0x13 },
+ { 0x42, 0x05 }, { 0x43, 0x1f }, { 0x44, 0x05 }, { 0x45, 0x1f },
+ { 0x46, 0x00 }, { 0x47, 0x00 }, { 0x48, 0x01 }, { 0x49, 0x43 },
+ { 0x4a, 0x01 }, { 0x4b, 0x4c }, { 0x4c, 0x01 }, { 0x4d, 0x6f },
+ { 0x4e, 0x01 }, { 0x4f, 0x92 }, { 0x50, 0x01 }, { 0x51, 0xb5 },
+ { 0x52, 0x01 }, { 0x53, 0xd4 }, { 0x58, 0x02 }, { 0x59, 0x06 },
+ { 0x5a, 0x02 }, { 0x5b, 0x33 }, { 0x5c, 0x02 }, { 0x5d, 0x59 },
+ { 0x5e, 0x02 }, { 0x5f, 0x7d }, { 0x60, 0x02 }, { 0x61, 0xbd },
+ { 0x62, 0x02 }, { 0x63, 0xf7 }, { 0x64, 0x03 }, { 0x65, 0x31 },
+ { 0x66, 0x03 }, { 0x67, 0x63 }, { 0x68, 0x03 }, { 0x69, 0x9d },
+ { 0x6a, 0x03 }, { 0x6b, 0xd2 }, { 0x6c, 0x04 }, { 0x6d, 0x05 },
+ { 0x6e, 0x04 }, { 0x6f, 0x38 }, { 0x70, 0x04 }, { 0x71, 0x51 },
+ { 0x72, 0x04 }, { 0x73, 0x70 }, { 0x74, 0x04 }, { 0x75, 0x85 },
+ { 0x76, 0x04 }, { 0x77, 0xa1 }, { 0x78, 0x04 }, { 0x79, 0xc0 },
+ { 0x7a, 0x04 }, { 0x7b, 0xd8 }, { 0x7c, 0x04 }, { 0x7d, 0xf2 },
+ { 0x7e, 0x05 }, { 0x7f, 0x10 }, { 0x80, 0x05 }, { 0x81, 0x21 },
+ { 0x82, 0x05 }, { 0x83, 0x2e }, { 0x84, 0x05 }, { 0x85, 0x3a },
+ { 0x86, 0x05 }, { 0x87, 0x3e }, { 0x88, 0x00 }, { 0x89, 0x00 },
+ { 0x8a, 0x01 }, { 0x8b, 0x86 }, { 0x8c, 0x01 }, { 0x8d, 0x8f },
+ { 0x8e, 0x01 }, { 0x8f, 0xb3 }, { 0x90, 0x01 }, { 0x91, 0xd7 },
+ { 0x92, 0x01 }, { 0x93, 0xfb }, { 0x94, 0x02 }, { 0x95, 0x18 },
+ { 0x96, 0x02 }, { 0x97, 0x4f }, { 0x98, 0x02 }, { 0x99, 0x7e },
+ { 0x9a, 0x02 }, { 0x9b, 0xa6 }, { 0x9c, 0x02 }, { 0x9d, 0xcf },
+ { 0x9e, 0x03 }, { 0x9f, 0x14 }, { 0xa4, 0x03 }, { 0xa5, 0x52 },
+ { 0xa6, 0x03 }, { 0xa7, 0x93 }, { 0xac, 0x03 }, { 0xad, 0xcf },
+ { 0xae, 0x04 }, { 0xaf, 0x08 }, { 0xb0, 0x04 }, { 0xb1, 0x42 },
+ { 0xb2, 0x04 }, { 0xb3, 0x7f }, { 0xb4, 0x04 }, { 0xb5, 0xb4 },
+ { 0xb6, 0x04 }, { 0xb7, 0xcc }, { 0xb8, 0x04 }, { 0xb9, 0xf2 },
+ { 0xba, 0x05 }, { 0xbb, 0x0c }, { 0xbc, 0x05 }, { 0xbd, 0x26 },
+ { 0xbe, 0x05 }, { 0xbf, 0x4b }, { 0xc0, 0x05 }, { 0xc1, 0x64 },
+ { 0xc2, 0x05 }, { 0xc3, 0x83 }, { 0xc4, 0x05 }, { 0xc5, 0xa1 },
+ { 0xc6, 0x05 }, { 0xc7, 0xba }, { 0xc8, 0x05 }, { 0xc9, 0xc4 },
+ { 0xca, 0x05 }, { 0xcb, 0xd5 }, { 0xcc, 0x05 }, { 0xcd, 0xd5 },
+ { 0xce, 0x00 }, { 0xcf, 0xce }, { 0xd0, 0x00 }, { 0xd1, 0xdb },
+ { 0xd2, 0x01 }, { 0xd3, 0x32 }, { 0xd4, 0x01 }, { 0xd5, 0x3b },
+ { 0xd6, 0x01 }, { 0xd7, 0x74 }, { 0xd8, 0x01 }, { 0xd9, 0x7d },
+ { 0xfe, 0x60 }, { 0x00, 0xcc }, { 0x01, 0x0f }, { 0x02, 0xff },
+ { 0x03, 0x01 }, { 0x04, 0x00 }, { 0x05, 0x02 }, { 0x06, 0x00 },
+ { 0x07, 0x00 }, { 0x09, 0xc4 }, { 0x0a, 0x00 }, { 0x0b, 0x04 },
+ { 0x0c, 0x01 }, { 0x0d, 0x00 }, { 0x0e, 0x04 }, { 0x0f, 0x00 },
+ { 0x10, 0x71 }, { 0x12, 0xc4 }, { 0x13, 0x00 }, { 0x14, 0x04 },
+ { 0x15, 0x01 }, { 0x16, 0x00 }, { 0x17, 0x06 }, { 0x18, 0x00 },
+ { 0x19, 0x71 }, { 0x1b, 0xc4 }, { 0x1c, 0x00 }, { 0x1d, 0x02 },
+ { 0x1e, 0x00 }, { 0x1f, 0x00 }, { 0x20, 0x08 }, { 0x21, 0x66 },
+ { 0x22, 0xb4 }, { 0x24, 0xc4 }, { 0x25, 0x00 }, { 0x26, 0x02 },
+ { 0x27, 0x00 }, { 0x28, 0x00 }, { 0x29, 0x07 }, { 0x2a, 0x66 },
+ { 0x2b, 0xb4 }, { 0x2f, 0xc4 }, { 0x30, 0x00 }, { 0x31, 0x04 },
+ { 0x32, 0x01 }, { 0x33, 0x00 }, { 0x34, 0x03 }, { 0x35, 0x00 },
+ { 0x36, 0x71 }, { 0x38, 0xc4 }, { 0x39, 0x00 }, { 0x3a, 0x04 },
+ { 0x3b, 0x01 }, { 0x3d, 0x00 }, { 0x3f, 0x05 }, { 0x40, 0x00 },
+ { 0x41, 0x71 }, { 0x83, 0xce }, { 0x84, 0x02 }, { 0x85, 0x20 },
+ { 0x86, 0xdc }, { 0x87, 0x00 }, { 0x88, 0x04 }, { 0x89, 0x00 },
+ { 0x8a, 0xbb }, { 0x8b, 0x80 }, { 0xc7, 0x0e }, { 0xc8, 0x05 },
+ { 0xc9, 0x1f }, { 0xca, 0x06 }, { 0xcb, 0x00 }, { 0xcc, 0x03 },
+ { 0xcd, 0x04 }, { 0xce, 0x1f }, { 0xcf, 0x1f }, { 0xd0, 0x1f },
+ { 0xd1, 0x1f }, { 0xd2, 0x1f }, { 0xd3, 0x1f }, { 0xd4, 0x1f },
+ { 0xd5, 0x1f }, { 0xd6, 0x1f }, { 0xd7, 0x17 }, { 0xd8, 0x1f },
+ { 0xd9, 0x16 }, { 0xda, 0x1f }, { 0xdb, 0x0e }, { 0xdc, 0x01 },
+ { 0xdd, 0x1f }, { 0xde, 0x02 }, { 0xdf, 0x00 }, { 0xe0, 0x03 },
+ { 0xe1, 0x04 }, { 0xe2, 0x1f }, { 0xe3, 0x1f }, { 0xe4, 0x1f },
+ { 0xe5, 0x1f }, { 0xe6, 0x1f }, { 0xe7, 0x1f }, { 0xe8, 0x1f },
+ { 0xe9, 0x1f }, { 0xea, 0x1f }, { 0xeb, 0x17 }, { 0xec, 0x1f },
+ { 0xed, 0x16 }, { 0xee, 0x1f }, { 0xef, 0x03 }, { 0xfe, 0x70 },
+ { 0x5a, 0x0b }, { 0x5b, 0x0b }, { 0x5c, 0x55 }, { 0x5d, 0x24 },
+ { 0xfe, 0x90 }, { 0x12, 0x24 }, { 0x13, 0x49 }, { 0x14, 0x92 },
+ { 0x15, 0x86 }, { 0x16, 0x61 }, { 0x17, 0x18 }, { 0x18, 0x24 },
+ { 0x19, 0x49 }, { 0x1a, 0x92 }, { 0x1b, 0x86 }, { 0x1c, 0x61 },
+ { 0x1d, 0x18 }, { 0x1e, 0x24 }, { 0x1f, 0x49 }, { 0x20, 0x92 },
+ { 0x21, 0x86 }, { 0x22, 0x61 }, { 0x23, 0x18 }, { 0xfe, 0x40 },
+ { 0x0e, 0x10 }, { 0xfe, 0xa0 }, { 0x04, 0x80 }, { 0x16, 0x00 },
+ { 0x26, 0x10 }, { 0x2f, 0x37 }, { 0xfe, 0xd0 }, { 0x06, 0x0f },
+ { 0x4b, 0x00 }, { 0x56, 0x4a }, { 0xfe, 0x00 }, { 0xc2, 0x09 },
+ { 0x35, 0x00 }, { 0xfe, 0x70 }, { 0x7d, 0x61 }, { 0x7f, 0x00 },
+ { 0x7e, 0x4e }, { 0x52, 0x2c }, { 0x49, 0x00 }, { 0x4a, 0x00 },
+ { 0x4b, 0x00 }, { 0x4c, 0x00 }, { 0x4d, 0xe8 }, { 0x4e, 0x25 },
+ { 0x4f, 0x6e }, { 0x50, 0xae }, { 0x51, 0x2f }, { 0xad, 0xf4 },
+ { 0xae, 0x8f }, { 0xaf, 0x00 }, { 0xb0, 0x54 }, { 0xb1, 0x3a },
+ { 0xb2, 0x00 }, { 0xb3, 0x00 }, { 0xb4, 0x00 }, { 0xb5, 0x00 },
+ { 0xb6, 0x18 }, { 0xb7, 0x30 }, { 0xb8, 0x4a }, { 0xb9, 0x98 },
+ { 0xba, 0x30 }, { 0xbb, 0x60 }, { 0xbc, 0x50 }, { 0xbd, 0x00 },
+ { 0xbe, 0x00 }, { 0xbf, 0x39 }, { 0xfe, 0x00 }, { 0x51, 0x66 },
};
static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
@@ -31,7 +159,8 @@ static int visionox_rm69299_power_on(struct visionox_rm69299 *ctx)
{
int ret;
- ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ ret = regulator_bulk_enable(ARRAY_SIZE(visionox_rm69299_supplies),
+ ctx->supplies);
if (ret < 0)
return ret;
@@ -54,37 +183,32 @@ static int visionox_rm69299_power_off(struct visionox_rm69299 *ctx)
{
gpiod_set_value(ctx->reset_gpio, 0);
- return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ return regulator_bulk_disable(ARRAY_SIZE(visionox_rm69299_supplies),
+ ctx->supplies);
}
static int visionox_rm69299_unprepare(struct drm_panel *panel)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
ctx->dsi->mode_flags = 0;
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
- if (ret < 0)
- dev_err(ctx->panel.dev, "set_display_off cmd failed ret = %d\n", ret);
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
/* 120ms delay required here as per DCS spec */
- msleep(120);
-
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "enter_sleep cmd failed ret = %d\n", ret);
- }
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = visionox_rm69299_power_off(ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
- return ret;
+ return visionox_rm69299_power_off(ctx);
}
static int visionox_rm69299_prepare(struct drm_panel *panel)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
- int ret;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi };
+ int ret, i;
ret = visionox_rm69299_power_on(ctx);
if (ret < 0)
@@ -92,52 +216,20 @@ static int visionox_rm69299_prepare(struct drm_panel *panel)
ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 0 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 1 failed, ret = %d\n", ret);
- goto power_off;
- }
+ for (i = 0; i < ctx->desc->init_seq_len; i++)
+ mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, &ctx->desc->init_seq[i * 2], 2);
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 2 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "cmd set tx 3 failed, ret = %d\n", ret);
- goto power_off;
- }
-
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "exit_sleep_mode cmd failed ret = %d\n", ret);
- goto power_off;
- }
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
/* Per DSI spec wait 120ms after sending exit sleep DCS command */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
- if (ret < 0) {
- dev_err(ctx->panel.dev, "set_display_on cmd failed ret = %d\n", ret);
- goto power_off;
- }
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
/* Per DSI spec wait 120ms after sending set_display_on DCS command */
- msleep(120);
+ mipi_dsi_msleep(&dsi_ctx, 120);
- return 0;
-
-power_off:
- return ret;
+ return dsi_ctx.accum_err;
}
static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
@@ -154,14 +246,26 @@ static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
.flags = 0,
};
+static const struct drm_display_mode visionox_rm69299_1080x2160_60hz = {
+ .clock = 158695,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 2,
+ .htotal = 1080 + 26 + 2 + 36,
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 8,
+ .vsync_end = 2160 + 8 + 4,
+ .vtotal = 2160 + 8 + 4 + 4,
+ .flags = 0,
+};
+
static int visionox_rm69299_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
struct drm_display_mode *mode;
- mode = drm_mode_duplicate(connector->dev,
- &visionox_rm69299_1080x2248_60hz);
+ mode = drm_mode_duplicate(connector->dev, ctx->desc->mode);
if (!mode) {
dev_err(ctx->panel.dev, "failed to create a new display mode\n");
return 0;
@@ -187,20 +291,22 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
struct visionox_rm69299 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_rm69299, panel,
+ &visionox_rm69299_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ctx->desc = device_get_match_data(dev);
+ if (!ctx->desc)
+ return -EINVAL;
mipi_dsi_set_drvdata(dsi, ctx);
ctx->dsi = dsi;
- ctx->supplies[0].supply = "vdda";
- ctx->supplies[0].init_load_uA = 32000;
- ctx->supplies[1].supply = "vdd3p3";
- ctx->supplies[1].init_load_uA = 13200;
-
- ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(visionox_rm69299_supplies),
+ visionox_rm69299_supplies, &ctx->supplies);
if (ret < 0)
return ret;
@@ -210,8 +316,6 @@ static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
return PTR_ERR(ctx->reset_gpio);
}
- drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
- DRM_MODE_CONNECTOR_DSI);
drm_panel_add(&ctx->panel);
dsi->lanes = 4;
@@ -239,8 +343,23 @@ static void visionox_rm69299_remove(struct mipi_dsi_device *dsi)
drm_panel_remove(&ctx->panel);
}
+const struct visionox_rm69299_panel_desc visionox_rm69299_1080p_display_desc = {
+ .mode = &visionox_rm69299_1080x2248_60hz,
+ .init_seq = (const u8 *)visionox_rm69299_1080x2248_60hz_init_seq,
+ .init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2248_60hz_init_seq),
+};
+
+const struct visionox_rm69299_panel_desc visionox_rm69299_shift_desc = {
+ .mode = &visionox_rm69299_1080x2160_60hz,
+ .init_seq = (const u8 *)visionox_rm69299_1080x2160_60hz_init_seq,
+ .init_seq_len = ARRAY_SIZE(visionox_rm69299_1080x2160_60hz_init_seq),
+};
+
static const struct of_device_id visionox_rm69299_of_match[] = {
- { .compatible = "visionox,rm69299-1080p-display", },
+ { .compatible = "visionox,rm69299-1080p-display",
+ .data = &visionox_rm69299_1080p_display_desc },
+ { .compatible = "visionox,rm69299-shift",
+ .data = &visionox_rm69299_shift_desc },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, visionox_rm69299_of_match);
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm692e5.c b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
index 4db7fa8d74c4..e53645d59413 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm692e5.c
@@ -360,9 +360,11 @@ static int visionox_rm692e5_probe(struct mipi_dsi_device *dsi)
struct visionox_rm692e5 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_rm692e5, panel,
+ &visionox_rm692e5_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(visionox_rm692e5_supplies),
@@ -383,8 +385,6 @@ static int visionox_rm692e5_probe(struct mipi_dsi_device *dsi)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
- drm_panel_init(&ctx->panel, dev, &visionox_rm692e5_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
ctx->panel.prepare_prev_first = true;
ctx->panel.backlight = visionox_rm692e5_create_backlight(dsi);
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
index 17b8defe79c1..97a79411e1ec 100644
--- a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -248,9 +248,11 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
struct visionox_vtdr6130 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct visionox_vtdr6130, panel,
+ &visionox_vtdr6130_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ret = devm_regulator_bulk_get_const(&dsi->dev,
ARRAY_SIZE(visionox_vtdr6130_supplies),
@@ -273,9 +275,6 @@ static int visionox_vtdr6130_probe(struct mipi_dsi_device *dsi)
MIPI_DSI_CLOCK_NON_CONTINUOUS;
ctx->panel.prepare_prev_first = true;
- drm_panel_init(&ctx->panel, dev, &visionox_vtdr6130_panel_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ctx->panel.backlight = visionox_vtdr6130_create_backlight(dsi);
if (IS_ERR(ctx->panel.backlight))
return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
diff --git a/drivers/gpu/drm/panel/panel-widechips-ws2401.c b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
index 2591ff8f0d4e..dd74610bd2eb 100644
--- a/drivers/gpu/drm/panel/panel-widechips-ws2401.c
+++ b/drivers/gpu/drm/panel/panel-widechips-ws2401.c
@@ -347,9 +347,11 @@ static int ws2401_probe(struct spi_device *spi)
struct ws2401 *ws;
int ret;
- ws = devm_kzalloc(dev, sizeof(*ws), GFP_KERNEL);
- if (!ws)
- return -ENOMEM;
+ ws = devm_drm_panel_alloc(dev, struct ws2401, panel, &ws2401_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(ws))
+ return PTR_ERR(ws);
+
ws->dev = dev;
/*
@@ -379,9 +381,6 @@ static int ws2401_probe(struct spi_device *spi)
ws2401_read_mtp_id(ws);
ws2401_power_off(ws);
- drm_panel_init(&ws->panel, dev, &ws2401_drm_funcs,
- DRM_MODE_CONNECTOR_DPI);
-
ret = drm_panel_of_backlight(&ws->panel);
if (ret)
return dev_err_probe(dev, ret,
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 2b91414c2829..fc6516373b5d 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -241,9 +241,10 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
struct xpp055c272 *ctx;
int ret;
- ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = devm_drm_panel_alloc(dev, struct xpp055c272, panel,
+ &xpp055c272_funcs, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(ctx->reset_gpio))
@@ -269,9 +270,6 @@ static int xpp055c272_probe(struct mipi_dsi_device *dsi)
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
- drm_panel_init(&ctx->panel, &dsi->dev, &xpp055c272_funcs,
- DRM_MODE_CONNECTOR_DSI);
-
ret = drm_panel_of_backlight(&ctx->panel);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 3385fd3ef41a..5d0dce10336b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -29,7 +29,7 @@ static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfr
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *ptdev = dev_get_drvdata(dev);
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
struct dev_pm_opp *opp;
int err;
@@ -40,7 +40,7 @@ static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
err = dev_pm_opp_set_rate(dev, *freq);
if (!err)
- ptdev->pfdevfreq.current_frequency = *freq;
+ pfdev->pfdevfreq.current_frequency = *freq;
return err;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 5d35076b2e6d..04bec27449cb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -209,6 +209,11 @@ int panfrost_device_init(struct panfrost_device *pfdev)
spin_lock_init(&pfdev->cycle_counter.lock);
+#ifdef CONFIG_DEBUG_FS
+ mutex_init(&pfdev->debugfs.gems_lock);
+ INIT_LIST_HEAD(&pfdev->debugfs.gems_list);
+#endif
+
err = panfrost_pm_domain_init(pfdev);
if (err)
return err;
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index dcff70f905cd..077525a3ad68 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -111,6 +111,17 @@ struct panfrost_compatible {
u8 gpu_quirks;
};
+/**
+ * struct panfrost_device_debugfs - Device-wide DebugFS tracking structures
+ */
+struct panfrost_device_debugfs {
+ /** @gems_list: Device-wide list of GEM objects owned by at least one file. */
+ struct list_head gems_list;
+
+ /** @gems_lock: Serializes access to the device-wide list of GEM objects. */
+ struct mutex gems_lock;
+};
+
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -164,6 +175,10 @@ struct panfrost_device {
atomic_t use_count;
spinlock_t lock;
} cycle_counter;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panfrost_device_debugfs debugfs;
+#endif
};
struct panfrost_mmu {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index f1ec3b02f15a..842239b43cdf 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -13,6 +13,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <drm/panfrost_drm.h>
+#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_syncobj.h>
@@ -312,7 +313,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
ret = drm_sched_job_init(&job->base,
&file_priv->sched_entity[slot],
- 1, NULL);
+ 1, NULL, file->client_id);
if (ret)
goto out_put_job;
@@ -495,6 +496,46 @@ out_put_object:
return ret;
}
+static int panfrost_ioctl_set_label_bo(struct drm_device *ddev, void *data,
+ struct drm_file *file)
+{
+ struct drm_panfrost_set_label_bo *args = data;
+ struct drm_gem_object *obj;
+ const char *label = NULL;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (args->label) {
+ label = strndup_user(u64_to_user_ptr(args->label),
+ PANFROST_BO_LABEL_MAXLEN);
+ if (IS_ERR(label)) {
+ ret = PTR_ERR(label);
+ if (ret == -EINVAL)
+ ret = -E2BIG;
+ goto err_put_obj;
+ }
+ }
+
+ /*
+ * We treat passing a label of length 0 and passing a NULL label
+ * differently, because even though they might seem conceptually
+ * similar, future uses of the BO label might expect a different
+ * behaviour in each case.
+ */
+ panfrost_gem_set_label(obj, label);
+
+err_put_obj:
+ drm_gem_object_put(obj);
+
+ return ret;
+}
+
int panfrost_unstable_ioctl_check(void)
{
if (!unstable_ioctls)
@@ -561,6 +602,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW),
PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW),
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(SET_LABEL_BO, set_label_bo, DRM_RENDER_ALLOW),
};
static void panfrost_gpu_show_fdinfo(struct panfrost_device *pfdev,
@@ -618,6 +660,37 @@ static const struct file_operations panfrost_drm_driver_fops = {
.show_fdinfo = drm_show_fdinfo,
};
+#ifdef CONFIG_DEBUG_FS
+static int panthor_gems_show(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct panfrost_device *pfdev = dev->dev_private;
+
+ panfrost_gem_debugfs_print_bos(pfdev, m);
+
+ return 0;
+}
+
+static struct drm_info_list panthor_debugfs_list[] = {
+ {"gems", panthor_gems_show, 0, NULL},
+};
+
+static int panthor_gems_debugfs_init(struct drm_minor *minor)
+{
+ drm_debugfs_create_files(panthor_debugfs_list,
+ ARRAY_SIZE(panthor_debugfs_list),
+ minor->debugfs_root, minor);
+
+ return 0;
+}
+
+static void panfrost_debugfs_init(struct drm_minor *minor)
+{
+ panthor_gems_debugfs_init(minor);
+}
+#endif
+
/*
* Panfrost driver version:
* - 1.0 - initial interface
@@ -625,6 +698,7 @@ static const struct file_operations panfrost_drm_driver_fops = {
* - 1.2 - adds AFBC_FEATURES query
* - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT
* - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries
+ * - 1.4 - adds SET_LABEL_BO
*/
static const struct drm_driver panfrost_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
@@ -637,10 +711,13 @@ static const struct drm_driver panfrost_drm_driver = {
.name = "panfrost",
.desc = "panfrost DRM",
.major = 1,
- .minor = 3,
+ .minor = 4,
.gem_create_object = panfrost_gem_create_object,
.gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = panfrost_debugfs_init,
+#endif
};
static int panfrost_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 963f04ba2de6..bb73f2a68a12 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
@@ -11,6 +12,36 @@
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
+#ifdef CONFIG_DEBUG_FS
+static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
+ struct panfrost_gem_object *bo)
+{
+ bo->debugfs.creator.tgid = current->group_leader->pid;
+ get_task_comm(bo->debugfs.creator.process_name, current->group_leader);
+
+ mutex_lock(&pfdev->debugfs.gems_lock);
+ list_add_tail(&bo->debugfs.node, &pfdev->debugfs.gems_list);
+ mutex_unlock(&pfdev->debugfs.gems_lock);
+}
+
+static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo)
+{
+ struct panfrost_device *pfdev = bo->base.base.dev->dev_private;
+
+ if (list_empty(&bo->debugfs.node))
+ return;
+
+ mutex_lock(&pfdev->debugfs.gems_lock);
+ list_del_init(&bo->debugfs.node);
+ mutex_unlock(&pfdev->debugfs.gems_lock);
+}
+#else
+static void panfrost_gem_debugfs_bo_add(struct panfrost_device *pfdev,
+ struct panfrost_gem_object *bo)
+{}
+static void panfrost_gem_debugfs_bo_rm(struct panfrost_gem_object *bo) {}
+#endif
+
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -35,6 +66,10 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
*/
WARN_ON_ONCE(!list_empty(&bo->mappings.list));
+ kfree_const(bo->label.str);
+ panfrost_gem_debugfs_bo_rm(bo);
+ mutex_destroy(&bo->label.lock);
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
@@ -260,6 +295,9 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
obj->base.map_wc = !pfdev->coherent;
+ mutex_init(&obj->label.lock);
+
+ panfrost_gem_debugfs_bo_add(pfdev, obj);
return &obj->base.base;
}
@@ -300,5 +338,153 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
bo = to_panfrost_bo(obj);
bo->noexec = true;
+ /*
+ * We assign this generic label because this function cannot
+ * be reached through any of the Panfrost UM driver-specific
+ * code paths, unless one is given by explicitly calling the
+ * SET_LABEL_BO ioctl. It is therefore preferable to have a
+ * blanket BO tag that tells us the object was imported from
+ * another driver than nothing at all.
+ */
+ panfrost_gem_internal_set_label(obj, "GEM PRIME buffer");
+
return obj;
}
+
+void
+panfrost_gem_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ const char *old_label;
+
+ scoped_guard(mutex, &bo->label.lock) {
+ old_label = bo->label.str;
+ bo->label.str = label;
+ }
+
+ kfree_const(old_label);
+}
+
+void
+panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ const char *str;
+
+ /* We should never attempt labelling a UM-exposed GEM object */
+ if (drm_WARN_ON(bo->base.base.dev, bo->base.base.handle_count > 0))
+ return;
+
+ if (!label)
+ return;
+
+ str = kstrdup_const(label, GFP_KERNEL);
+ if (!str) {
+ /* Failing to allocate memory for a label isn't a fatal condition */
+ drm_warn(bo->base.base.dev, "Not enough memory to allocate BO label");
+ return;
+ }
+
+ panfrost_gem_set_label(obj, str);
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct gem_size_totals {
+ size_t size;
+ size_t resident;
+ size_t reclaimable;
+};
+
+struct flag_def {
+ u32 flag;
+ const char *name;
+};
+
+static void panfrost_gem_debugfs_print_flag_names(struct seq_file *m)
+{
+ int len;
+ int i;
+
+ static const struct flag_def gem_state_flags_names[] = {
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED, "imported"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED, "exported"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED, "purged"},
+ {PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE, "purgeable"},
+ };
+
+ seq_puts(m, "GEM state flags: ");
+ for (i = 0, len = ARRAY_SIZE(gem_state_flags_names); i < len; i++) {
+ seq_printf(m, "%s (0x%x)%s", gem_state_flags_names[i].name,
+ gem_state_flags_names[i].flag, (i < len - 1) ? ", " : "\n\n");
+ }
+}
+
+static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo,
+ struct seq_file *m,
+ struct gem_size_totals *totals)
+{
+ unsigned int refcount = kref_read(&bo->base.base.refcount);
+ char creator_info[32] = {};
+ size_t resident_size;
+ u32 gem_state_flags = 0;
+
+ /* Skip BOs being destroyed. */
+ if (!refcount)
+ return;
+
+ resident_size = bo->base.pages ? bo->base.base.size : 0;
+
+ snprintf(creator_info, sizeof(creator_info),
+ "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid);
+ seq_printf(m, "%-32s%-16d%-16d%-16zd%-16zd0x%-16lx",
+ creator_info,
+ bo->base.base.name,
+ refcount,
+ bo->base.base.size,
+ resident_size,
+ drm_vma_node_start(&bo->base.base.vma_node));
+
+ if (bo->base.base.import_attach)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED;
+ if (bo->base.base.dma_buf)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED;
+
+ if (bo->base.madv < 0)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED;
+ else if (bo->base.madv > 0)
+ gem_state_flags |= PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE;
+
+ seq_printf(m, "0x%-10x", gem_state_flags);
+
+ scoped_guard(mutex, &bo->label.lock) {
+ seq_printf(m, "%s\n", bo->label.str ? : "");
+ }
+
+ totals->size += bo->base.base.size;
+ totals->resident += resident_size;
+ if (bo->base.madv > 0)
+ totals->reclaimable += resident_size;
+}
+
+void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
+ struct seq_file *m)
+{
+ struct gem_size_totals totals = {0};
+ struct panfrost_gem_object *bo;
+
+ panfrost_gem_debugfs_print_flag_names(m);
+
+ seq_puts(m, "created-by global-name refcount size resident-size file-offset state label\n");
+ seq_puts(m, "-----------------------------------------------------------------------------------------------------------------------------------\n");
+
+ scoped_guard(mutex, &pfdev->debugfs.gems_lock) {
+ list_for_each_entry(bo, &pfdev->debugfs.gems_list, debugfs.node) {
+ panfrost_gem_debugfs_bo_print(bo, m, &totals);
+ }
+ }
+
+ seq_puts(m, "===================================================================================================================================\n");
+ seq_printf(m, "Total size: %zd, Total resident: %zd, Total reclaimable: %zd\n",
+ totals.size, totals.resident, totals.reclaimable);
+}
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 7516b7ecf7fe..8de3e76f2717 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -8,6 +8,46 @@
#include <drm/drm_mm.h>
struct panfrost_mmu;
+struct panfrost_device;
+
+#define PANFROST_BO_LABEL_MAXLEN 4096
+
+enum panfrost_debugfs_gem_state_flags {
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED: GEM BO is PRIME imported. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_IMPORTED = BIT(0),
+
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED: GEM BO is PRIME exported. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_EXPORTED = BIT(1),
+
+ /** @PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED: GEM BO was reclaimed by the shrinker. */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGED = BIT(2),
+
+ /**
+ * @PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE: GEM BO pages were marked as no longer
+ * needed by UM and can be reclaimed by the shrinker.
+ */
+ PANFROST_DEBUGFS_GEM_STATE_FLAG_PURGEABLE = BIT(3),
+};
+
+/**
+ * struct panfrost_gem_debugfs - GEM object's DebugFS list information
+ */
+struct panfrost_gem_debugfs {
+ /**
+ * @node: Node used to insert the object in the device-wide list of
+ * GEM objects, to display information about it through a DebugFS file.
+ */
+ struct list_head node;
+
+ /** @creator: Information about the UM process which created the GEM. */
+ struct {
+ /** @creator.process_name: Group leader name in owning thread's process */
+ char process_name[TASK_COMM_LEN];
+
+ /** @creator.tgid: PID of the thread's group leader within its process */
+ pid_t tgid;
+ } creator;
+};
struct panfrost_gem_object {
struct drm_gem_shmem_object base;
@@ -41,8 +81,26 @@ struct panfrost_gem_object {
*/
size_t heap_rss_size;
+ /**
+ * @label: BO tagging fields. The label can be assigned within the
+ * driver itself or through a specific IOCTL.
+ */
+ struct {
+ /**
+ * @label.str: Pointer to NULL-terminated string,
+ */
+ const char *str;
+
+ /** @lock.str: Protects access to the @label.str field. */
+ struct mutex lock;
+ } label;
+
bool noexec :1;
bool is_heap :1;
+
+#ifdef CONFIG_DEBUG_FS
+ struct panfrost_gem_debugfs debugfs;
+#endif
};
struct panfrost_gem_mapping {
@@ -89,4 +147,12 @@ void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
int panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
+void panfrost_gem_set_label(struct drm_gem_object *obj, const char *label);
+void panfrost_gem_internal_set_label(struct drm_gem_object *obj, const char *label);
+
+#ifdef CONFIG_DEBUG_FS
+void panfrost_gem_debugfs_print_bos(struct panfrost_device *pfdev,
+ struct seq_file *m);
+#endif
+
#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 52befead08c6..563f16bae543 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -111,6 +111,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_put_mapping;
perfcnt->buf = map.vaddr;
+ panfrost_gem_internal_set_label(&bo->base, "Perfcnt sample buffer");
+
/*
* Invalidate the cache and clear the counters to start from a fresh
* state.
diff --git a/drivers/gpu/drm/panthor/panthor_device.h b/drivers/gpu/drm/panthor/panthor_device.h
index 465d3ab1b79e..4fc7cf2aeed5 100644
--- a/drivers/gpu/drm/panthor/panthor_device.h
+++ b/drivers/gpu/drm/panthor/panthor_device.h
@@ -230,6 +230,24 @@ struct panthor_file {
/** @ptdev: Device attached to this file. */
struct panthor_device *ptdev;
+ /** @user_mmio: User MMIO related fields. */
+ struct {
+ /**
+ * @offset: Offset used for user MMIO mappings.
+ *
+ * This offset should not be used to check the type of mapping
+ * except in panthor_mmap(). After that point, MMIO mapping
+ * offsets have been adjusted to match
+ * DRM_PANTHOR_USER_MMIO_OFFSET and that macro should be used
+ * instead.
+ * Make sure this rule is followed at all times, because
+ * userspace is in control of the offset, and can change the
+ * value behind our back. Otherwise it can lead to erroneous
+ * branching happening in kernel space.
+ */
+ u64 offset;
+ } user_mmio;
+
/** @vms: VM pool attached to this file. */
struct panthor_vm_pool *vms;
@@ -437,4 +455,75 @@ static int panthor_request_ ## __name ## _irq(struct panthor_device *ptdev, \
extern struct workqueue_struct *panthor_cleanup_wq;
+static inline void gpu_write(struct panthor_device *ptdev, u32 reg, u32 data)
+{
+ writel(data, ptdev->iomem + reg);
+}
+
+static inline u32 gpu_read(struct panthor_device *ptdev, u32 reg)
+{
+ return readl(ptdev->iomem + reg);
+}
+
+static inline u32 gpu_read_relaxed(struct panthor_device *ptdev, u32 reg)
+{
+ return readl_relaxed(ptdev->iomem + reg);
+}
+
+static inline void gpu_write64(struct panthor_device *ptdev, u32 reg, u64 data)
+{
+ gpu_write(ptdev, reg, lower_32_bits(data));
+ gpu_write(ptdev, reg + 4, upper_32_bits(data));
+}
+
+static inline u64 gpu_read64(struct panthor_device *ptdev, u32 reg)
+{
+ return (gpu_read(ptdev, reg) | ((u64)gpu_read(ptdev, reg + 4) << 32));
+}
+
+static inline u64 gpu_read64_relaxed(struct panthor_device *ptdev, u32 reg)
+{
+ return (gpu_read_relaxed(ptdev, reg) |
+ ((u64)gpu_read_relaxed(ptdev, reg + 4) << 32));
+}
+
+static inline u64 gpu_read64_counter(struct panthor_device *ptdev, u32 reg)
+{
+ u32 lo, hi1, hi2;
+ do {
+ hi1 = gpu_read(ptdev, reg + 4);
+ lo = gpu_read(ptdev, reg);
+ hi2 = gpu_read(ptdev, reg + 4);
+ } while (hi1 != hi2);
+ return lo | ((u64)hi2 << 32);
+}
+
+#define gpu_read_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(gpu_read, val, cond, delay_us, timeout_us, false, \
+ dev, reg)
+
+#define gpu_read_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
+#define gpu_read64_poll_timeout(dev, reg, val, cond, delay_us, timeout_us) \
+ read_poll_timeout(gpu_read64, val, cond, delay_us, timeout_us, false, \
+ dev, reg)
+
+#define gpu_read64_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read64, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
+#define gpu_read_relaxed_poll_timeout_atomic(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout_atomic(gpu_read_relaxed, val, cond, delay_us, \
+ timeout_us, false, dev, reg)
+
+#define gpu_read64_relaxed_poll_timeout(dev, reg, val, cond, delay_us, \
+ timeout_us) \
+ read_poll_timeout(gpu_read64_relaxed, val, cond, delay_us, timeout_us, \
+ false, dev, reg)
+
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 6200cad22563..1116f2d2826e 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -772,8 +772,8 @@ static int panthor_query_timestamp_info(struct panthor_device *ptdev,
#else
arg->timestamp_frequency = 0;
#endif
- arg->current_timestamp = panthor_gpu_read_timestamp(ptdev);
- arg->timestamp_offset = panthor_gpu_read_timestamp_offset(ptdev);
+ arg->current_timestamp = gpu_read64_counter(ptdev, GPU_TIMESTAMP);
+ arg->timestamp_offset = gpu_read64(ptdev, GPU_TIMESTAMP_OFFSET);
pm_runtime_put(ptdev->base.dev);
return 0;
@@ -996,7 +996,8 @@ static int panthor_ioctl_group_submit(struct drm_device *ddev, void *data,
const struct drm_panthor_queue_submit *qsubmit = &jobs_args[i];
struct drm_sched_job *job;
- job = panthor_job_create(pfile, args->group_handle, qsubmit);
+ job = panthor_job_create(pfile, args->group_handle, qsubmit,
+ file->client_id);
if (IS_ERR(job)) {
ret = PTR_ERR(job);
goto out_cleanup_submit_ctx;
@@ -1378,6 +1379,20 @@ err_put_obj:
return ret;
}
+static int panthor_ioctl_set_user_mmio_offset(struct drm_device *ddev,
+ void *data, struct drm_file *file)
+{
+ struct drm_panthor_set_user_mmio_offset *args = data;
+ struct panthor_file *pfile = file->driver_priv;
+
+ if (args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_32BIT &&
+ args->offset != DRM_PANTHOR_USER_MMIO_OFFSET_64BIT)
+ return -EINVAL;
+
+ WRITE_ONCE(pfile->user_mmio.offset, args->offset);
+ return 0;
+}
+
static int
panthor_open(struct drm_device *ddev, struct drm_file *file)
{
@@ -1395,6 +1410,18 @@ panthor_open(struct drm_device *ddev, struct drm_file *file)
}
pfile->ptdev = ptdev;
+ pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET;
+
+#ifdef CONFIG_ARM64
+ /*
+ * With 32-bit systems being limited by the 32-bit representation of
+ * mmap2's pgoffset field, we need to make the MMIO offset arch
+ * specific.
+ */
+ if (test_tsk_thread_flag(current, TIF_32BIT))
+ pfile->user_mmio.offset = DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
+#endif
+
ret = panthor_vm_pool_create(pfile);
if (ret)
@@ -1448,6 +1475,7 @@ static const struct drm_ioctl_desc panthor_drm_driver_ioctls[] = {
PANTHOR_IOCTL(TILER_HEAP_DESTROY, tiler_heap_destroy, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(GROUP_SUBMIT, group_submit, DRM_RENDER_ALLOW),
PANTHOR_IOCTL(BO_SET_LABEL, bo_set_label, DRM_RENDER_ALLOW),
+ PANTHOR_IOCTL(SET_USER_MMIO_OFFSET, set_user_mmio_offset, DRM_RENDER_ALLOW),
};
static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -1456,30 +1484,26 @@ static int panthor_mmap(struct file *filp, struct vm_area_struct *vma)
struct panthor_file *pfile = file->driver_priv;
struct panthor_device *ptdev = pfile->ptdev;
u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
+ u64 user_mmio_offset;
int ret, cookie;
if (!drm_dev_enter(file->minor->dev, &cookie))
return -ENODEV;
-#ifdef CONFIG_ARM64
- /*
- * With 32-bit systems being limited by the 32-bit representation of
- * mmap2's pgoffset field, we need to make the MMIO offset arch
- * specific. This converts a user MMIO offset into something the kernel
- * driver understands.
+ /* Adjust the user MMIO offset to match the offset used kernel side.
+ * We use a local variable with a READ_ONCE() here to make sure
+ * the user_mmio_offset we use for the is_user_mmio_mapping() check
+ * hasn't changed when we do the offset adjustment.
*/
- if (test_tsk_thread_flag(current, TIF_32BIT) &&
- offset >= DRM_PANTHOR_USER_MMIO_OFFSET_32BIT) {
- offset += DRM_PANTHOR_USER_MMIO_OFFSET_64BIT -
- DRM_PANTHOR_USER_MMIO_OFFSET_32BIT;
+ user_mmio_offset = READ_ONCE(pfile->user_mmio.offset);
+ if (offset >= user_mmio_offset) {
+ offset -= user_mmio_offset;
+ offset += DRM_PANTHOR_USER_MMIO_OFFSET;
vma->vm_pgoff = offset >> PAGE_SHIFT;
- }
-#endif
-
- if (offset >= DRM_PANTHOR_USER_MMIO_OFFSET)
ret = panthor_device_mmap_io(ptdev, vma);
- else
+ } else {
ret = drm_gem_mmap(filp, vma);
+ }
drm_dev_exit(cookie);
return ret;
@@ -1583,6 +1607,7 @@ static void panthor_debugfs_init(struct drm_minor *minor)
* - adds PANTHOR_GROUP_PRIORITY_REALTIME priority
* - 1.3 - adds DRM_PANTHOR_GROUP_STATE_INNOCENT flag
* - 1.4 - adds DRM_IOCTL_PANTHOR_BO_SET_LABEL ioctl
+ * - 1.5 - adds DRM_PANTHOR_SET_USER_MMIO_OFFSET ioctl
*/
static const struct drm_driver panthor_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ |
@@ -1596,7 +1621,7 @@ static const struct drm_driver panthor_drm_driver = {
.name = "panthor",
.desc = "Panthor DRM driver",
.major = 1,
- .minor = 4,
+ .minor = 5,
.gem_create_object = panthor_gem_create_object,
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 7bc38e635329..36f1034839c2 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -1063,8 +1063,8 @@ static void panthor_fw_stop(struct panthor_device *ptdev)
u32 status;
gpu_write(ptdev, MCU_CONTROL, MCU_CONTROL_DISABLE);
- if (readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_DISABLED, 10, 100000))
+ if (gpu_read_poll_timeout(ptdev, MCU_STATUS, status,
+ status == MCU_STATUS_DISABLED, 10, 100000))
drm_err(&ptdev->base, "Failed to stop MCU");
}
@@ -1089,8 +1089,9 @@ void panthor_fw_pre_reset(struct panthor_device *ptdev, bool on_hang)
panthor_fw_update_reqs(glb_iface, req, GLB_HALT, GLB_HALT);
gpu_write(ptdev, CSF_DOORBELL(CSF_GLB_DOORBELL_ID), 1);
- if (!readl_poll_timeout(ptdev->iomem + MCU_STATUS, status,
- status == MCU_STATUS_HALT, 10, 100000)) {
+ if (!gpu_read_poll_timeout(ptdev, MCU_STATUS, status,
+ status == MCU_STATUS_HALT, 10,
+ 100000)) {
ptdev->reset.fast = true;
} else {
drm_warn(&ptdev->base, "Failed to cleanly suspend MCU");
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c
index 32d678a0114e..534735518824 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.c
+++ b/drivers/gpu/drm/panthor/panthor_gpu.c
@@ -108,14 +108,9 @@ static void panthor_gpu_init_info(struct panthor_device *ptdev)
ptdev->gpu_info.as_present = gpu_read(ptdev, GPU_AS_PRESENT);
- ptdev->gpu_info.shader_present = gpu_read(ptdev, GPU_SHADER_PRESENT_LO);
- ptdev->gpu_info.shader_present |= (u64)gpu_read(ptdev, GPU_SHADER_PRESENT_HI) << 32;
-
- ptdev->gpu_info.tiler_present = gpu_read(ptdev, GPU_TILER_PRESENT_LO);
- ptdev->gpu_info.tiler_present |= (u64)gpu_read(ptdev, GPU_TILER_PRESENT_HI) << 32;
-
- ptdev->gpu_info.l2_present = gpu_read(ptdev, GPU_L2_PRESENT_LO);
- ptdev->gpu_info.l2_present |= (u64)gpu_read(ptdev, GPU_L2_PRESENT_HI) << 32;
+ ptdev->gpu_info.shader_present = gpu_read64(ptdev, GPU_SHADER_PRESENT);
+ ptdev->gpu_info.tiler_present = gpu_read64(ptdev, GPU_TILER_PRESENT);
+ ptdev->gpu_info.l2_present = gpu_read64(ptdev, GPU_L2_PRESENT);
arch_major = GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id);
product_major = GPU_PROD_MAJOR(ptdev->gpu_info.gpu_id);
@@ -154,8 +149,7 @@ static void panthor_gpu_irq_handler(struct panthor_device *ptdev, u32 status)
if (status & GPU_IRQ_FAULT) {
u32 fault_status = gpu_read(ptdev, GPU_FAULT_STATUS);
- u64 address = ((u64)gpu_read(ptdev, GPU_FAULT_ADDR_HI) << 32) |
- gpu_read(ptdev, GPU_FAULT_ADDR_LO);
+ u64 address = gpu_read64(ptdev, GPU_FAULT_ADDR);
drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
fault_status, panthor_exception_name(ptdev, fault_status & 0xFF),
@@ -246,45 +240,27 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
u32 pwroff_reg, u32 pwrtrans_reg,
u64 mask, u32 timeout_us)
{
- u32 val, i;
+ u32 val;
int ret;
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
- if (mask & GENMASK(31, 0))
- gpu_write(ptdev, pwroff_reg, mask);
+ gpu_write64(ptdev, pwroff_reg, mask);
- if (mask >> 32)
- gpu_write(ptdev, pwroff_reg + 4, mask >> 32);
-
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
return 0;
@@ -307,45 +283,26 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev,
u32 pwron_reg, u32 pwrtrans_reg,
u32 rdy_reg, u64 mask, u32 timeout_us)
{
- u32 val, i;
+ u32 val;
int ret;
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
-
- ret = readl_relaxed_poll_timeout(ptdev->iomem + pwrtrans_reg + (i * 4),
- val, !(mask32 & val),
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx power transition",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base,
+ "timeout waiting on %s:%llx power transition", blk_name,
+ mask);
+ return ret;
}
- if (mask & GENMASK(31, 0))
- gpu_write(ptdev, pwron_reg, mask);
-
- if (mask >> 32)
- gpu_write(ptdev, pwron_reg + 4, mask >> 32);
-
- for (i = 0; i < 2; i++) {
- u32 mask32 = mask >> (i * 32);
-
- if (!mask32)
- continue;
+ gpu_write64(ptdev, pwron_reg, mask);
- ret = readl_relaxed_poll_timeout(ptdev->iomem + rdy_reg + (i * 4),
- val, (mask32 & val) == mask32,
- 100, timeout_us);
- if (ret) {
- drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
- blk_name, mask);
- return ret;
- }
+ ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val,
+ !(mask & val), 100, timeout_us);
+ if (ret) {
+ drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness",
+ blk_name, mask);
+ return ret;
}
return 0;
@@ -494,49 +451,3 @@ void panthor_gpu_resume(struct panthor_device *ptdev)
panthor_gpu_l2_power_on(ptdev);
}
-/**
- * panthor_gpu_read_64bit_counter() - Read a 64-bit counter at a given offset.
- * @ptdev: Device.
- * @reg: The offset of the register to read.
- *
- * Return: The counter value.
- */
-static u64
-panthor_gpu_read_64bit_counter(struct panthor_device *ptdev, u32 reg)
-{
- u32 hi, lo;
-
- do {
- hi = gpu_read(ptdev, reg + 0x4);
- lo = gpu_read(ptdev, reg);
- } while (hi != gpu_read(ptdev, reg + 0x4));
-
- return ((u64)hi << 32) | lo;
-}
-
-/**
- * panthor_gpu_read_timestamp() - Read the timestamp register.
- * @ptdev: Device.
- *
- * Return: The GPU timestamp value.
- */
-u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev)
-{
- return panthor_gpu_read_64bit_counter(ptdev, GPU_TIMESTAMP_LO);
-}
-
-/**
- * panthor_gpu_read_timestamp_offset() - Read the timestamp offset register.
- * @ptdev: Device.
- *
- * Return: The GPU timestamp offset value.
- */
-u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev)
-{
- u32 hi, lo;
-
- hi = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_HI);
- lo = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_LO);
-
- return ((u64)hi << 32) | lo;
-}
diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h
index 7f6133a66127..7c17a8c06858 100644
--- a/drivers/gpu/drm/panthor/panthor_gpu.h
+++ b/drivers/gpu/drm/panthor/panthor_gpu.h
@@ -30,9 +30,9 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
*/
#define panthor_gpu_power_on(ptdev, type, mask, timeout_us) \
panthor_gpu_block_power_on(ptdev, #type, \
- type ## _PWRON_LO, \
- type ## _PWRTRANS_LO, \
- type ## _READY_LO, \
+ type ## _PWRON, \
+ type ## _PWRTRANS, \
+ type ## _READY, \
mask, timeout_us)
/**
@@ -42,15 +42,13 @@ int panthor_gpu_block_power_off(struct panthor_device *ptdev,
*/
#define panthor_gpu_power_off(ptdev, type, mask, timeout_us) \
panthor_gpu_block_power_off(ptdev, #type, \
- type ## _PWROFF_LO, \
- type ## _PWRTRANS_LO, \
+ type ## _PWROFF, \
+ type ## _PWRTRANS, \
mask, timeout_us)
int panthor_gpu_l2_power_on(struct panthor_device *ptdev);
int panthor_gpu_flush_caches(struct panthor_device *ptdev,
u32 l2, u32 lsc, u32 other);
int panthor_gpu_soft_reset(struct panthor_device *ptdev);
-u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev);
-u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev);
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 6ca9a2642a4e..b39ea6acc6a9 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -510,9 +510,9 @@ static int wait_ready(struct panthor_device *ptdev, u32 as_nr)
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending.
*/
- ret = readl_relaxed_poll_timeout_atomic(ptdev->iomem + AS_STATUS(as_nr),
- val, !(val & AS_STATUS_AS_ACTIVE),
- 10, 100000);
+ ret = gpu_read_relaxed_poll_timeout_atomic(ptdev, AS_STATUS(as_nr), val,
+ !(val & AS_STATUS_AS_ACTIVE),
+ 10, 100000);
if (ret) {
panthor_device_schedule_reset(ptdev);
@@ -564,8 +564,7 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr,
region = region_width | region_start;
/* Lock the region that needs to be updated */
- gpu_write(ptdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
- gpu_write(ptdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
+ gpu_write64(ptdev, AS_LOCKADDR(as_nr), region);
write_cmd(ptdev, as_nr, AS_COMMAND_LOCK);
}
@@ -615,14 +614,9 @@ static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr,
if (ret)
return ret;
- gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
- gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
-
- gpu_write(ptdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
- gpu_write(ptdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
-
- gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
- gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
+ gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab);
+ gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr);
+ gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg);
return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -635,14 +629,9 @@ static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr)
if (ret)
return ret;
- gpu_write(ptdev, AS_TRANSTAB_LO(as_nr), 0);
- gpu_write(ptdev, AS_TRANSTAB_HI(as_nr), 0);
-
- gpu_write(ptdev, AS_MEMATTR_LO(as_nr), 0);
- gpu_write(ptdev, AS_MEMATTR_HI(as_nr), 0);
-
- gpu_write(ptdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
- gpu_write(ptdev, AS_TRANSCFG_HI(as_nr), 0);
+ gpu_write64(ptdev, AS_TRANSTAB(as_nr), 0);
+ gpu_write64(ptdev, AS_MEMATTR(as_nr), 0);
+ gpu_write64(ptdev, AS_TRANSCFG(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE);
}
@@ -1681,8 +1670,7 @@ static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status)
u32 source_id;
fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as));
- addr = gpu_read(ptdev, AS_FAULTADDRESS_LO(as));
- addr |= (u64)gpu_read(ptdev, AS_FAULTADDRESS_HI(as)) << 32;
+ addr = gpu_read64(ptdev, AS_FAULTADDRESS(as));
/* decode the fault status */
exception_type = fault_status & 0xFF;
@@ -2523,7 +2511,7 @@ panthor_vm_bind_job_create(struct drm_file *file,
kref_init(&job->refcount);
job->vm = panthor_vm_get(vm);
- ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm);
+ ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm, file->client_id);
if (ret)
goto err_put_job;
diff --git a/drivers/gpu/drm/panthor/panthor_regs.h b/drivers/gpu/drm/panthor/panthor_regs.h
index a7a323dc5cf9..48bbfd40138c 100644
--- a/drivers/gpu/drm/panthor/panthor_regs.h
+++ b/drivers/gpu/drm/panthor/panthor_regs.h
@@ -63,20 +63,16 @@
#define GPU_STATUS_DBG_ENABLED BIT(8)
#define GPU_FAULT_STATUS 0x3C
-#define GPU_FAULT_ADDR_LO 0x40
-#define GPU_FAULT_ADDR_HI 0x44
+#define GPU_FAULT_ADDR 0x40
#define GPU_PWR_KEY 0x50
#define GPU_PWR_KEY_UNLOCK 0x2968A819
#define GPU_PWR_OVERRIDE0 0x54
#define GPU_PWR_OVERRIDE1 0x58
-#define GPU_TIMESTAMP_OFFSET_LO 0x88
-#define GPU_TIMESTAMP_OFFSET_HI 0x8C
-#define GPU_CYCLE_COUNT_LO 0x90
-#define GPU_CYCLE_COUNT_HI 0x94
-#define GPU_TIMESTAMP_LO 0x98
-#define GPU_TIMESTAMP_HI 0x9C
+#define GPU_TIMESTAMP_OFFSET 0x88
+#define GPU_CYCLE_COUNT 0x90
+#define GPU_TIMESTAMP 0x98
#define GPU_THREAD_MAX_THREADS 0xA0
#define GPU_THREAD_MAX_WORKGROUP_SIZE 0xA4
@@ -85,47 +81,29 @@
#define GPU_TEXTURE_FEATURES(n) (0xB0 + ((n) * 4))
-#define GPU_SHADER_PRESENT_LO 0x100
-#define GPU_SHADER_PRESENT_HI 0x104
-#define GPU_TILER_PRESENT_LO 0x110
-#define GPU_TILER_PRESENT_HI 0x114
-#define GPU_L2_PRESENT_LO 0x120
-#define GPU_L2_PRESENT_HI 0x124
-
-#define SHADER_READY_LO 0x140
-#define SHADER_READY_HI 0x144
-#define TILER_READY_LO 0x150
-#define TILER_READY_HI 0x154
-#define L2_READY_LO 0x160
-#define L2_READY_HI 0x164
-
-#define SHADER_PWRON_LO 0x180
-#define SHADER_PWRON_HI 0x184
-#define TILER_PWRON_LO 0x190
-#define TILER_PWRON_HI 0x194
-#define L2_PWRON_LO 0x1A0
-#define L2_PWRON_HI 0x1A4
-
-#define SHADER_PWROFF_LO 0x1C0
-#define SHADER_PWROFF_HI 0x1C4
-#define TILER_PWROFF_LO 0x1D0
-#define TILER_PWROFF_HI 0x1D4
-#define L2_PWROFF_LO 0x1E0
-#define L2_PWROFF_HI 0x1E4
-
-#define SHADER_PWRTRANS_LO 0x200
-#define SHADER_PWRTRANS_HI 0x204
-#define TILER_PWRTRANS_LO 0x210
-#define TILER_PWRTRANS_HI 0x214
-#define L2_PWRTRANS_LO 0x220
-#define L2_PWRTRANS_HI 0x224
-
-#define SHADER_PWRACTIVE_LO 0x240
-#define SHADER_PWRACTIVE_HI 0x244
-#define TILER_PWRACTIVE_LO 0x250
-#define TILER_PWRACTIVE_HI 0x254
-#define L2_PWRACTIVE_LO 0x260
-#define L2_PWRACTIVE_HI 0x264
+#define GPU_SHADER_PRESENT 0x100
+#define GPU_TILER_PRESENT 0x110
+#define GPU_L2_PRESENT 0x120
+
+#define SHADER_READY 0x140
+#define TILER_READY 0x150
+#define L2_READY 0x160
+
+#define SHADER_PWRON 0x180
+#define TILER_PWRON 0x190
+#define L2_PWRON 0x1A0
+
+#define SHADER_PWROFF 0x1C0
+#define TILER_PWROFF 0x1D0
+#define L2_PWROFF 0x1E0
+
+#define SHADER_PWRTRANS 0x200
+#define TILER_PWRTRANS 0x210
+#define L2_PWRTRANS 0x220
+
+#define SHADER_PWRACTIVE 0x240
+#define TILER_PWRACTIVE 0x250
+#define L2_PWRACTIVE 0x260
#define GPU_REVID 0x280
@@ -168,10 +146,8 @@
#define MMU_AS_SHIFT 6
#define MMU_AS(as) (MMU_BASE + ((as) << MMU_AS_SHIFT))
-#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x0)
-#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x4)
-#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x8)
-#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0xC)
+#define AS_TRANSTAB(as) (MMU_AS(as) + 0x0)
+#define AS_MEMATTR(as) (MMU_AS(as) + 0x8)
#define AS_MEMATTR_AARCH64_INNER_ALLOC_IMPL (2 << 2)
#define AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(w, r) ((3 << 2) | \
((w) ? BIT(0) : 0) | \
@@ -183,8 +159,7 @@
#define AS_MEMATTR_AARCH64_INNER_OUTER_NC (1 << 6)
#define AS_MEMATTR_AARCH64_INNER_OUTER_WB (2 << 6)
#define AS_MEMATTR_AARCH64_FAULT (3 << 6)
-#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10)
-#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14)
+#define AS_LOCKADDR(as) (MMU_AS(as) + 0x10)
#define AS_COMMAND(as) (MMU_AS(as) + 0x18)
#define AS_COMMAND_NOP 0
#define AS_COMMAND_UPDATE 1
@@ -199,12 +174,10 @@
#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
-#define AS_FAULTADDRESS_LO(as) (MMU_AS(as) + 0x20)
-#define AS_FAULTADDRESS_HI(as) (MMU_AS(as) + 0x24)
+#define AS_FAULTADDRESS(as) (MMU_AS(as) + 0x20)
#define AS_STATUS(as) (MMU_AS(as) + 0x28)
#define AS_STATUS_AS_ACTIVE BIT(0)
-#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30)
-#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34)
+#define AS_TRANSCFG(as) (MMU_AS(as) + 0x30)
#define AS_TRANSCFG_ADRMODE_UNMAPPED (1 << 0)
#define AS_TRANSCFG_ADRMODE_IDENTITY (2 << 0)
#define AS_TRANSCFG_ADRMODE_AARCH64_4K (6 << 0)
@@ -222,18 +195,11 @@
#define AS_TRANSCFG_DISABLE_AF_FAULT BIT(34)
#define AS_TRANSCFG_WXN BIT(35)
#define AS_TRANSCFG_XREADABLE BIT(36)
-#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38)
-#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C)
+#define AS_FAULTEXTRA(as) (MMU_AS(as) + 0x38)
#define CSF_GPU_LATEST_FLUSH_ID 0x10000
#define CSF_DOORBELL(i) (0x80000 + ((i) * 0x10000))
#define CSF_GLB_DOORBELL_ID 0
-#define gpu_write(dev, reg, data) \
- writel(data, (dev)->iomem + (reg))
-
-#define gpu_read(dev, reg) \
- readl((dev)->iomem + (reg))
-
#endif
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index 43ee57728de5..a2248f692a03 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -3732,7 +3732,8 @@ struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job)
struct drm_sched_job *
panthor_job_create(struct panthor_file *pfile,
u16 group_handle,
- const struct drm_panthor_queue_submit *qsubmit)
+ const struct drm_panthor_queue_submit *qsubmit,
+ u64 drm_client_id)
{
struct panthor_group_pool *gpool = pfile->groups;
struct panthor_job *job;
@@ -3804,7 +3805,7 @@ panthor_job_create(struct panthor_file *pfile,
ret = drm_sched_job_init(&job->base,
&job->group->queues[job->queue_idx]->entity,
- credits, job->group);
+ credits, job->group, drm_client_id);
if (ret)
goto err_put_job;
diff --git a/drivers/gpu/drm/panthor/panthor_sched.h b/drivers/gpu/drm/panthor/panthor_sched.h
index e650a445cf50..742b0b4ff3a3 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.h
+++ b/drivers/gpu/drm/panthor/panthor_sched.h
@@ -29,7 +29,8 @@ int panthor_group_get_state(struct panthor_file *pfile,
struct drm_sched_job *
panthor_job_create(struct panthor_file *pfile,
u16 group_handle,
- const struct drm_panthor_queue_submit *qsubmit);
+ const struct drm_panthor_queue_submit *qsubmit,
+ u64 drm_client_id);
struct drm_sched_job *panthor_job_get(struct drm_sched_job *job);
struct panthor_vm *panthor_job_vm(struct drm_sched_job *sched_job);
void panthor_job_put(struct drm_sched_job *job);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
index f9893d7d6dfc..e9e59c5e70d5 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_plane.h
@@ -16,7 +16,7 @@ struct rcar_du_format_info;
struct rcar_du_group;
/*
- * The RCAR DU has 8 hardware planes, shared between primary and overlay planes.
+ * The R-Car DU has 8 hardware planes, shared between primary and overlay planes.
* As using overlay planes requires at least one of the CRTCs being enabled, no
* more than 7 overlay planes can be available. We thus create 1 primary plane
* per CRTC and 7 overlay planes, for a total of up to 9 KMS planes.
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
index a9145253294f..af58b814e588 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c
@@ -878,9 +878,10 @@ static int rcar_lvds_probe(struct platform_device *pdev)
struct rcar_lvds *lvds;
int ret;
- lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
- if (lvds == NULL)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(&pdev->dev, struct rcar_lvds, bridge,
+ &rcar_lvds_bridge_ops);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
platform_set_drvdata(pdev, lvds);
@@ -895,7 +896,6 @@ static int rcar_lvds_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- lvds->bridge.funcs = &rcar_lvds_bridge_ops;
lvds->bridge.of_node = pdev->dev.of_node;
lvds->mmio = devm_platform_ioremap_resource(pdev, 0);
diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
index 7ab8be46c7f6..1af4c73f7a88 100644
--- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c
@@ -918,7 +918,6 @@ static int rcar_mipi_dsi_host_attach(struct mipi_dsi_host *host,
}
/* Initialize the DRM bridge. */
- dsi->bridge.funcs = &rcar_mipi_dsi_bridge_ops;
dsi->bridge.of_node = dsi->dev->of_node;
drm_bridge_add(&dsi->bridge);
@@ -1004,9 +1003,10 @@ static int rcar_mipi_dsi_probe(struct platform_device *pdev)
struct rcar_mipi_dsi *dsi;
int ret;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (dsi == NULL)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct rcar_mipi_dsi, bridge,
+ &rcar_mipi_dsi_bridge_ops);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
index 5e40f0c1e7b0..e1aa6a719529 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_drv.c
@@ -50,9 +50,20 @@ static const struct rzg2l_du_device_info rzg2l_du_r9a07g044_info = {
}
};
+static const struct rzg2l_du_device_info rzg2l_du_r9a09g057_info = {
+ .channels_mask = BIT(0),
+ .routes = {
+ [RZG2L_DU_OUTPUT_DSI0] = {
+ .possible_outputs = BIT(0),
+ .port = 0,
+ },
+ },
+};
+
static const struct of_device_id rzg2l_du_of_table[] = {
{ .compatible = "renesas,r9a07g043u-du", .data = &rzg2l_du_r9a07g043u_info },
{ .compatible = "renesas,r9a07g044-du", .data = &rzg2l_du_r9a07g044_info },
+ { .compatible = "renesas,r9a09g057-du", .data = &rzg2l_du_r9a09g057_info },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
index 564ab4cb3d37..5e6dd16705e6 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_encoder.c
@@ -22,6 +22,26 @@
* Encoder
*/
+static unsigned int rzg2l_du_encoder_count_ports(struct device_node *node)
+{
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int num_ports = 0;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = of_node_get(node);
+
+ for_each_child_of_node(ports, port) {
+ if (of_node_name_eq(port, "port"))
+ num_ports++;
+ }
+
+ of_node_put(ports);
+
+ return num_ports;
+}
+
static const struct drm_encoder_funcs rzg2l_du_encoder_funcs = {
};
@@ -50,10 +70,26 @@ int rzg2l_du_encoder_init(struct rzg2l_du_device *rcdu,
struct drm_bridge *bridge;
int ret;
- /* Locate the DRM bridge from the DT node. */
- bridge = of_drm_find_bridge(enc_node);
- if (!bridge)
- return -EPROBE_DEFER;
+ /*
+ * Locate the DRM bridge from the DT node. For the DPAD outputs, if the
+ * DT node has a single port, assume that it describes a panel and
+ * create a panel bridge.
+ */
+ if (output == RZG2L_DU_OUTPUT_DPAD0 && rzg2l_du_encoder_count_ports(enc_node) == 1) {
+ struct drm_panel *panel = of_drm_find_panel(enc_node);
+
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+
+ bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ } else {
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge)
+ return -EPROBE_DEFER;
+ }
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n",
enc_node, rzg2l_du_output_name(output));
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
index dc6ab012cdb6..f87337c3cbb5 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c
@@ -4,10 +4,14 @@
*
* Copyright (C) 2022 Renesas Electronics Corporation
*/
+
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
@@ -15,6 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/reset.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
@@ -23,13 +28,37 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <video/mipi_display.h>
#include "rzg2l_mipi_dsi_regs.h"
+#define RZG2L_DCS_BUF_SIZE 128 /* Maximum DCS buffer size in external memory. */
+
+#define RZ_MIPI_DSI_FEATURE_16BPP BIT(0)
+
+struct rzg2l_mipi_dsi;
+
+struct rzg2l_mipi_dsi_hw_info {
+ int (*dphy_init)(struct rzg2l_mipi_dsi *dsi, u64 hsfreq_millihz);
+ void (*dphy_startup_late_init)(struct rzg2l_mipi_dsi *dsi);
+ void (*dphy_exit)(struct rzg2l_mipi_dsi *dsi);
+ int (*dphy_conf_clks)(struct rzg2l_mipi_dsi *dsi, unsigned long mode_freq,
+ u64 *hsfreq_millihz);
+ unsigned int (*dphy_mode_clk_check)(struct rzg2l_mipi_dsi *dsi,
+ unsigned long mode_freq);
+ u32 phy_reg_offset;
+ u32 link_reg_offset;
+ unsigned long min_dclk;
+ unsigned long max_dclk;
+ u8 features;
+};
+
struct rzg2l_mipi_dsi {
struct device *dev;
void __iomem *mmio;
+ const struct rzg2l_mipi_dsi_hw_info *info;
+
struct reset_control *rstc;
struct reset_control *arstc;
struct reset_control *prstc;
@@ -44,6 +73,10 @@ struct rzg2l_mipi_dsi {
unsigned int num_data_lanes;
unsigned int lanes;
unsigned long mode_flags;
+
+ /* DCS buffer pointers when using external memory. */
+ dma_addr_t dcs_buf_phys;
+ u8 *dcs_buf_virt;
};
static inline struct rzg2l_mipi_dsi *
@@ -75,7 +108,7 @@ struct rzg2l_mipi_dsi_timings {
static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
{
- .hsfreq_max = 80000,
+ .hsfreq_max = 80000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 13,
@@ -89,7 +122,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 125000,
+ .hsfreq_max = 125000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 12,
@@ -103,7 +136,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 250000,
+ .hsfreq_max = 250000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 12,
@@ -117,7 +150,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 360000,
+ .hsfreq_max = 360000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 10,
@@ -131,7 +164,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 720000,
+ .hsfreq_max = 720000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 9,
@@ -145,7 +178,7 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
.tlpx = 6,
},
{
- .hsfreq_max = 1500000,
+ .hsfreq_max = 1500000000,
.t_init = 79801,
.tclk_prepare = 8,
.ths_prepare = 9,
@@ -162,22 +195,22 @@ static const struct rzg2l_mipi_dsi_timings rzg2l_mipi_dsi_global_timings[] = {
static void rzg2l_mipi_dsi_phy_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data)
{
- iowrite32(data, dsi->mmio + reg);
+ iowrite32(data, dsi->mmio + dsi->info->phy_reg_offset + reg);
}
static void rzg2l_mipi_dsi_link_write(struct rzg2l_mipi_dsi *dsi, u32 reg, u32 data)
{
- iowrite32(data, dsi->mmio + LINK_REG_OFFSET + reg);
+ iowrite32(data, dsi->mmio + dsi->info->link_reg_offset + reg);
}
static u32 rzg2l_mipi_dsi_phy_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
{
- return ioread32(dsi->mmio + reg);
+ return ioread32(dsi->mmio + dsi->info->phy_reg_offset + reg);
}
static u32 rzg2l_mipi_dsi_link_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
{
- return ioread32(dsi->mmio + LINK_REG_OFFSET + reg);
+ return ioread32(dsi->mmio + dsi->info->link_reg_offset + reg);
}
/* -----------------------------------------------------------------------------
@@ -185,8 +218,9 @@ static u32 rzg2l_mipi_dsi_link_read(struct rzg2l_mipi_dsi *dsi, u32 reg)
*/
static int rzg2l_mipi_dsi_dphy_init(struct rzg2l_mipi_dsi *dsi,
- unsigned long hsfreq)
+ u64 hsfreq_millihz)
{
+ unsigned long hsfreq = DIV_ROUND_CLOSEST_ULL(hsfreq_millihz, MILLI);
const struct rzg2l_mipi_dsi_timings *dphy_timings;
unsigned int i;
u32 dphyctrl0;
@@ -255,20 +289,17 @@ static void rzg2l_mipi_dsi_dphy_exit(struct rzg2l_mipi_dsi *dsi)
reset_control_assert(dsi->rstc);
}
-static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
- const struct drm_display_mode *mode)
+static int rzg2l_dphy_conf_clks(struct rzg2l_mipi_dsi *dsi, unsigned long mode_freq,
+ u64 *hsfreq_millihz)
{
- unsigned long hsfreq;
+ unsigned long vclk_rate;
unsigned int bpp;
- u32 txsetr;
- u32 clstptsetr;
- u32 lptrnstsetr;
- u32 clkkpt;
- u32 clkbfht;
- u32 clkstpt;
- u32 golpbkt;
- int ret;
+ clk_set_rate(dsi->vclk, mode_freq * KILO);
+ vclk_rate = clk_get_rate(dsi->vclk);
+ if (vclk_rate != mode_freq * KILO)
+ dev_dbg(dsi->dev, "Requested vclk rate %lu, actual %lu mismatch\n",
+ mode_freq * KILO, vclk_rate);
/*
* Relationship between hsclk and vclk must follow
* vclk * bpp = hsclk * 8 * lanes
@@ -277,18 +308,39 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
* hsclk: DSI HS Byte clock frequency (Hz)
* lanes: number of data lanes
*
- * hsclk(bit) = hsclk(byte) * 8
+ * hsclk(bit) = hsclk(byte) * 8 = hsfreq
*/
bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
- hsfreq = (mode->clock * bpp * 8) / (8 * dsi->lanes);
+ *hsfreq_millihz = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(vclk_rate, bpp * MILLI),
+ dsi->lanes);
+
+ return 0;
+}
+
+static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
+ const struct drm_display_mode *mode)
+{
+ unsigned long hsfreq;
+ u64 hsfreq_millihz;
+ u32 txsetr;
+ u32 clstptsetr;
+ u32 lptrnstsetr;
+ u32 clkkpt;
+ u32 clkbfht;
+ u32 clkstpt;
+ u32 golpbkt;
+ u32 dsisetr;
+ int ret;
ret = pm_runtime_resume_and_get(dsi->dev);
if (ret < 0)
return ret;
- clk_set_rate(dsi->vclk, mode->clock * 1000);
+ ret = dsi->info->dphy_conf_clks(dsi, mode->clock, &hsfreq_millihz);
+ if (ret < 0)
+ goto err_phy;
- ret = rzg2l_mipi_dsi_dphy_init(dsi, hsfreq);
+ ret = dsi->info->dphy_init(dsi, hsfreq_millihz);
if (ret < 0)
goto err_phy;
@@ -296,6 +348,10 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
txsetr = TXSETR_DLEN | TXSETR_NUMLANEUSE(dsi->lanes - 1) | TXSETR_CLEN;
rzg2l_mipi_dsi_link_write(dsi, TXSETR, txsetr);
+ if (dsi->info->dphy_startup_late_init)
+ dsi->info->dphy_startup_late_init(dsi);
+
+ hsfreq = DIV_ROUND_CLOSEST_ULL(hsfreq_millihz, MILLI);
/*
* Global timings characteristic depends on high speed Clock Frequency
* Currently MIPI DSI-IF just supports maximum FHD@60 with:
@@ -304,12 +360,12 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
* - data lanes: maximum 4 lanes
* Therefore maximum hsclk will be 891 Mbps.
*/
- if (hsfreq > 445500) {
+ if (hsfreq > 445500000) {
clkkpt = 12;
clkbfht = 15;
clkstpt = 48;
golpbkt = 75;
- } else if (hsfreq > 250000) {
+ } else if (hsfreq > 250000000) {
clkkpt = 7;
clkbfht = 8;
clkstpt = 27;
@@ -328,10 +384,19 @@ static int rzg2l_mipi_dsi_startup(struct rzg2l_mipi_dsi *dsi,
lptrnstsetr = LPTRNSTSETR_GOLPBKT(golpbkt);
rzg2l_mipi_dsi_link_write(dsi, LPTRNSTSETR, lptrnstsetr);
+ /*
+ * Increase MRPSZ as the default value of 1 will result in long read
+ * commands payload not being saved to memory.
+ */
+ dsisetr = rzg2l_mipi_dsi_link_read(dsi, DSISETR);
+ dsisetr &= ~DSISETR_MRPSZ;
+ dsisetr |= FIELD_PREP(DSISETR_MRPSZ, RZG2L_DCS_BUF_SIZE);
+ rzg2l_mipi_dsi_link_write(dsi, DSISETR, dsisetr);
+
return 0;
err_phy:
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
return ret;
@@ -339,7 +404,7 @@ err_phy:
static void rzg2l_mipi_dsi_stop(struct rzg2l_mipi_dsi *dsi)
{
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
}
@@ -532,8 +597,8 @@ static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge,
flags);
}
-static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void rzg2l_mipi_dsi_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
{
struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
const struct drm_display_mode *mode;
@@ -550,6 +615,13 @@ static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
return;
rzg2l_mipi_dsi_set_display_timing(dsi, mode);
+}
+
+static void rzg2l_mipi_dsi_atomic_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state)
+{
+ struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
+ int ret;
ret = rzg2l_mipi_dsi_start_hs_clock(dsi);
if (ret < 0)
@@ -582,9 +654,22 @@ rzg2l_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
- if (mode->clock > 148500)
+ struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge);
+
+ if (mode->clock > dsi->info->max_dclk)
return MODE_CLOCK_HIGH;
+ if (mode->clock < dsi->info->min_dclk)
+ return MODE_CLOCK_LOW;
+
+ if (dsi->info->dphy_mode_clk_check) {
+ enum drm_mode_status status;
+
+ status = dsi->info->dphy_mode_clk_check(dsi, mode->clock);
+ if (status != MODE_OK)
+ return status;
+ }
+
return MODE_OK;
}
@@ -593,6 +678,7 @@ static const struct drm_bridge_funcs rzg2l_mipi_dsi_bridge_ops = {
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
.atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_pre_enable = rzg2l_mipi_dsi_atomic_pre_enable,
.atomic_enable = rzg2l_mipi_dsi_atomic_enable,
.atomic_disable = rzg2l_mipi_dsi_atomic_disable,
.mode_valid = rzg2l_mipi_dsi_bridge_mode_valid,
@@ -617,8 +703,16 @@ static int rzg2l_mipi_dsi_host_attach(struct mipi_dsi_host *host,
switch (mipi_dsi_pixel_format_to_bpp(device->format)) {
case 24:
+ break;
case 18:
break;
+ case 16:
+ if (!(dsi->info->features & RZ_MIPI_DSI_FEATURE_16BPP)) {
+ dev_err(dsi->dev, "Unsupported format 0x%04x\n",
+ device->format);
+ return -EINVAL;
+ }
+ break;
default:
dev_err(dsi->dev, "Unsupported format 0x%04x\n", device->format);
return -EINVAL;
@@ -651,9 +745,168 @@ static int rzg2l_mipi_dsi_host_detach(struct mipi_dsi_host *host,
return 0;
}
+static ssize_t rzg2l_mipi_dsi_read_response(struct rzg2l_mipi_dsi *dsi,
+ const struct mipi_dsi_msg *msg)
+{
+ u8 *msg_rx = msg->rx_buf;
+ u8 datatype;
+ u32 result;
+ u16 size;
+
+ result = rzg2l_mipi_dsi_link_read(dsi, RXRSS0R);
+ if (result & RXRSS0R_RXPKTDFAIL) {
+ dev_err(dsi->dev, "packet rx data did not save correctly\n");
+ return -EPROTO;
+ }
+
+ if (result & RXRSS0R_RXFAIL) {
+ dev_err(dsi->dev, "packet rx failure\n");
+ return -EPROTO;
+ }
+
+ if (!(result & RXRSS0R_RXSUC))
+ return -EPROTO;
+
+ datatype = FIELD_GET(RXRSS0R_DT, result);
+
+ switch (datatype) {
+ case 0:
+ dev_dbg(dsi->dev, "ACK\n");
+ return 0;
+ case MIPI_DSI_RX_END_OF_TRANSMISSION:
+ dev_dbg(dsi->dev, "EoTp\n");
+ return 0;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ dev_dbg(dsi->dev, "Acknowledge and error report: $%02x%02x\n",
+ (u8)FIELD_GET(RXRSS0R_DATA1, result),
+ (u8)FIELD_GET(RXRSS0R_DATA0, result));
+ return 0;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ msg_rx[0] = FIELD_GET(RXRSS0R_DATA0, result);
+ return 1;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ msg_rx[0] = FIELD_GET(RXRSS0R_DATA0, result);
+ msg_rx[1] = FIELD_GET(RXRSS0R_DATA1, result);
+ return 2;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ size = FIELD_GET(RXRSS0R_WC, result);
+
+ if (size > msg->rx_len) {
+ dev_err(dsi->dev, "rx buffer too small");
+ return -ENOSPC;
+ }
+
+ memcpy(msg_rx, dsi->dcs_buf_virt, size);
+ return size;
+ default:
+ dev_err(dsi->dev, "unhandled response type: %02x\n", datatype);
+ return -EPROTO;
+ }
+}
+
+static ssize_t rzg2l_mipi_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct rzg2l_mipi_dsi *dsi = host_to_rzg2l_mipi_dsi(host);
+ struct mipi_dsi_packet packet;
+ bool need_bta;
+ u32 value;
+ int ret;
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret < 0)
+ return ret;
+
+ /* Terminate operation after this descriptor is finished */
+ value = SQCH0DSC0AR_NXACT_TERM;
+
+ if (msg->flags & MIPI_DSI_MSG_REQ_ACK) {
+ need_bta = true; /* Message with explicitly requested ACK */
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_NON_READ);
+ } else if (msg->rx_buf && msg->rx_len > 0) {
+ need_bta = true; /* Read request */
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_READ);
+ } else {
+ need_bta = false;
+ value |= FIELD_PREP(SQCH0DSC0AR_BTA, SQCH0DSC0AR_BTA_NONE);
+ }
+
+ /* Set transmission speed */
+ if (msg->flags & MIPI_DSI_MSG_USE_LPM)
+ value |= SQCH0DSC0AR_SPD_LOW;
+ else
+ value |= SQCH0DSC0AR_SPD_HIGH;
+
+ /* Write TX packet header */
+ value |= FIELD_PREP(SQCH0DSC0AR_DT, packet.header[0]) |
+ FIELD_PREP(SQCH0DSC0AR_DATA0, packet.header[1]) |
+ FIELD_PREP(SQCH0DSC0AR_DATA1, packet.header[2]);
+
+ if (mipi_dsi_packet_format_is_long(msg->type)) {
+ value |= SQCH0DSC0AR_FMT_LONG;
+
+ if (packet.payload_length > RZG2L_DCS_BUF_SIZE) {
+ dev_err(dsi->dev, "Packet Tx payload size (%d) too large",
+ (unsigned int)packet.payload_length);
+ return -ENOSPC;
+ }
+
+ /* Copy TX packet payload data to memory space */
+ memcpy(dsi->dcs_buf_virt, packet.payload, packet.payload_length);
+ } else {
+ value |= SQCH0DSC0AR_FMT_SHORT;
+ }
+
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0AR, value);
+
+ /*
+ * Write: specify payload data source location, only used for
+ * long packet.
+ * Read: specify payload data storage location of response
+ * packet. Note: a read packet is always a short packet.
+ * If the response packet is a short packet or a long packet
+ * with WC = 0 (no payload), DTSEL is meaningless.
+ */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0BR, SQCH0DSC0BR_DTSEL_MEM_SPACE);
+
+ /*
+ * Set SQCHxSR.AACTFIN bit when descriptor actions are finished.
+ * Read: set Rx result save slot number to 0 (ACTCODE).
+ */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0CR, SQCH0DSC0CR_FINACT);
+
+ /* Set rx/tx payload data address, only relevant for long packet. */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0DSC0DR, (u32)dsi->dcs_buf_phys);
+
+ /* Start sequence 0 operation */
+ value = rzg2l_mipi_dsi_link_read(dsi, SQCH0SET0R);
+ value |= SQCH0SET0R_START;
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0SET0R, value);
+
+ /* Wait for operation to finish */
+ ret = read_poll_timeout(rzg2l_mipi_dsi_link_read,
+ value, value & SQCH0SR_ADESFIN,
+ 2000, 20000, false, dsi, SQCH0SR);
+ if (ret == 0) {
+ /* Success: clear status bit */
+ rzg2l_mipi_dsi_link_write(dsi, SQCH0SCR, SQCH0SCR_ADESFIN);
+
+ if (need_bta)
+ ret = rzg2l_mipi_dsi_read_response(dsi, msg);
+ else
+ ret = packet.payload_length;
+ }
+
+ return ret;
+}
+
static const struct mipi_dsi_host_ops rzg2l_mipi_dsi_host_ops = {
.attach = rzg2l_mipi_dsi_host_attach,
.detach = rzg2l_mipi_dsi_host_detach,
+ .transfer = rzg2l_mipi_dsi_host_transfer,
};
/* -----------------------------------------------------------------------------
@@ -701,13 +954,16 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
u32 txsetr;
int ret;
- dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
- if (!dsi)
- return -ENOMEM;
+ dsi = devm_drm_bridge_alloc(&pdev->dev, struct rzg2l_mipi_dsi, bridge,
+ &rzg2l_mipi_dsi_bridge_ops);
+ if (IS_ERR(dsi))
+ return PTR_ERR(dsi);
platform_set_drvdata(pdev, dsi);
dsi->dev = &pdev->dev;
+ dsi->info = of_device_get_match_data(&pdev->dev);
+
ret = drm_of_get_data_lanes_count_ep(dsi->dev->of_node, 1, 0, 1, 4);
if (ret < 0)
return dev_err_probe(dsi->dev, ret,
@@ -723,7 +979,7 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
if (IS_ERR(dsi->vclk))
return PTR_ERR(dsi->vclk);
- dsi->rstc = devm_reset_control_get_exclusive(dsi->dev, "rst");
+ dsi->rstc = devm_reset_control_get_optional_exclusive(dsi->dev, "rst");
if (IS_ERR(dsi->rstc))
return dev_err_probe(dsi->dev, PTR_ERR(dsi->rstc),
"failed to get rst\n");
@@ -751,17 +1007,16 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
* mode->clock and format are not available. So initialize DPHY with
* timing parameters for 80Mbps.
*/
- ret = rzg2l_mipi_dsi_dphy_init(dsi, 80000);
+ ret = dsi->info->dphy_init(dsi, 80000000ULL * MILLI);
if (ret < 0)
goto err_phy;
txsetr = rzg2l_mipi_dsi_link_read(dsi, TXSETR);
dsi->num_data_lanes = min(((txsetr >> 16) & 3) + 1, num_data_lanes);
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
/* Initialize the DRM bridge. */
- dsi->bridge.funcs = &rzg2l_mipi_dsi_bridge_ops;
dsi->bridge.of_node = dsi->dev->of_node;
/* Init host device */
@@ -771,10 +1026,15 @@ static int rzg2l_mipi_dsi_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_disable;
+ dsi->dcs_buf_virt = dma_alloc_coherent(dsi->host.dev, RZG2L_DCS_BUF_SIZE,
+ &dsi->dcs_buf_phys, GFP_KERNEL);
+ if (!dsi->dcs_buf_virt)
+ return -ENOMEM;
+
return 0;
err_phy:
- rzg2l_mipi_dsi_dphy_exit(dsi);
+ dsi->info->dphy_exit(dsi);
pm_runtime_put(dsi->dev);
err_pm_disable:
pm_runtime_disable(dsi->dev);
@@ -785,12 +1045,23 @@ static void rzg2l_mipi_dsi_remove(struct platform_device *pdev)
{
struct rzg2l_mipi_dsi *dsi = platform_get_drvdata(pdev);
+ dma_free_coherent(dsi->host.dev, RZG2L_DCS_BUF_SIZE, dsi->dcs_buf_virt,
+ dsi->dcs_buf_phys);
mipi_dsi_host_unregister(&dsi->host);
pm_runtime_disable(&pdev->dev);
}
+static const struct rzg2l_mipi_dsi_hw_info rzg2l_mipi_dsi_info = {
+ .dphy_init = rzg2l_mipi_dsi_dphy_init,
+ .dphy_exit = rzg2l_mipi_dsi_dphy_exit,
+ .dphy_conf_clks = rzg2l_dphy_conf_clks,
+ .link_reg_offset = 0x10000,
+ .min_dclk = 5803,
+ .max_dclk = 148500,
+};
+
static const struct of_device_id rzg2l_mipi_dsi_of_table[] = {
- { .compatible = "renesas,rzg2l-mipi-dsi" },
+ { .compatible = "renesas,rzg2l-mipi-dsi", .data = &rzg2l_mipi_dsi_info, },
{ /* sentinel */ }
};
diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
index 1dbc16ec64a4..d8082a87d874 100644
--- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
+++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi_regs.h
@@ -41,8 +41,6 @@
#define DSIDPHYTIM3_THS_ZERO(x) ((x) << 0)
/* --------------------------------------------------------*/
-/* Link Registers */
-#define LINK_REG_OFFSET 0x10000
/* Link Status Register */
#define LINKSR 0x10
@@ -81,6 +79,20 @@
#define RSTSR_SWRSTLP (1 << 1)
#define RSTSR_SWRSTHS (1 << 0)
+/* DSI Set Register */
+#define DSISETR 0x120
+#define DSISETR_MRPSZ GENMASK(15, 0)
+
+/* Rx Result Save Slot 0 Register */
+#define RXRSS0R 0x240
+#define RXRSS0R_RXPKTDFAIL BIT(28)
+#define RXRSS0R_RXFAIL BIT(27)
+#define RXRSS0R_RXSUC BIT(25)
+#define RXRSS0R_DT GENMASK(21, 16)
+#define RXRSS0R_DATA1 GENMASK(15, 8)
+#define RXRSS0R_DATA0 GENMASK(7, 0)
+#define RXRSS0R_WC GENMASK(15, 0) /* Word count for long packet. */
+
/* Clock Lane Stop Time Set Register */
#define CLSTPTSETR 0x314
#define CLSTPTSETR_CLKKPT(x) ((x) << 24)
@@ -148,4 +160,44 @@
#define VICH1HPSETR_HFP(x) (((x) & 0x1fff) << 16)
#define VICH1HPSETR_HBP(x) (((x) & 0x1fff) << 0)
+/* Sequence Channel 0 Set 0 Register */
+#define SQCH0SET0R 0x5c0
+#define SQCH0SET0R_START BIT(0)
+
+/* Sequence Channel 0 Status Register */
+#define SQCH0SR 0x5d0
+#define SQCH0SR_ADESFIN BIT(8)
+
+/* Sequence Channel 0 Status Clear Register */
+#define SQCH0SCR 0x5d4
+#define SQCH0SCR_ADESFIN BIT(8)
+
+/* Sequence Channel 0 Descriptor 0-A Register */
+#define SQCH0DSC0AR 0x780
+#define SQCH0DSC0AR_NXACT_TERM 0 /* Bit 28 */
+#define SQCH0DSC0AR_BTA GENMASK(27, 26)
+#define SQCH0DSC0AR_BTA_NONE 0
+#define SQCH0DSC0AR_BTA_NON_READ 1
+#define SQCH0DSC0AR_BTA_READ 2
+#define SQCH0DSC0AR_BTA_ONLY 3
+#define SQCH0DSC0AR_SPD_HIGH 0
+#define SQCH0DSC0AR_SPD_LOW BIT(25)
+#define SQCH0DSC0AR_FMT_SHORT 0
+#define SQCH0DSC0AR_FMT_LONG BIT(24)
+#define SQCH0DSC0AR_DT GENMASK(21, 16)
+#define SQCH0DSC0AR_DATA1 GENMASK(15, 8)
+#define SQCH0DSC0AR_DATA0 GENMASK(7, 0)
+
+/* Sequence Channel 0 Descriptor 0-B Register */
+#define SQCH0DSC0BR 0x784
+#define SQCH0DSC0BR_DTSEL_MEM_SPACE BIT(24) /* Use external memory */
+
+/* Sequence Channel 0 Descriptor 0-C Register */
+#define SQCH0DSC0CR 0x788
+#define SQCH0DSC0CR_FINACT BIT(0)
+#define SQCH0DSC0CR_AUXOP BIT(22)
+
+/* Sequence Channel 0 Descriptor 0-D Register */
+#define SQCH0DSC0DR 0x78c
+
#endif /* __RZG2L_MIPI_DSI_REGS_H__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index dcc1f07632c3..5829ee061c61 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -52,16 +52,9 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
}
if (drm_is_afbc(mode_cmd->modifier[0])) {
- int ret, i;
-
ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
if (ret) {
- struct drm_gem_object **obj = afbc_fb->base.obj;
-
- for (i = 0; i < info->num_planes; ++i)
- drm_gem_object_put(obj[i]);
-
- kfree(afbc_fb);
+ drm_framebuffer_put(&afbc_fb->base);
return ERR_PTR(ret);
}
}
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index f56e77e7f6d0..261713dd7d5a 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -32,78 +32,123 @@
#define TRACE_SYSTEM gpu_scheduler
#define TRACE_INCLUDE_FILE gpu_scheduler_trace
+/**
+ * DOC: uAPI trace events
+ *
+ * ``drm_sched_job_queue``, ``drm_sched_job_run``, ``drm_sched_job_add_dep``,
+ * ``drm_sched_job_done`` and ``drm_sched_job_unschedulable`` are considered
+ * stable uAPI.
+ *
+ * Common trace events attributes:
+ *
+ * * ``dev`` - the dev_name() of the device running the job.
+ *
+ * * ``ring`` - the hardware ring running the job. Together with ``dev`` it
+ * uniquely identifies where the job is going to be executed.
+ *
+ * * ``fence`` - the &struct dma_fence.context and the &struct dma_fence.seqno of
+ * &struct drm_sched_fence.finished
+ *
+ * All the events depends on drm_sched_job_arm() having been called already for
+ * the job because they use &struct drm_sched_job.sched or
+ * &struct drm_sched_job.s_fence.
+ */
+
DECLARE_EVENT_CLASS(drm_sched_job,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity),
TP_STRUCT__entry(
- __field(struct drm_sched_entity *, entity)
- __field(struct dma_fence *, fence)
__string(name, sched_job->sched->name)
- __field(uint64_t, id)
__field(u32, job_count)
__field(int, hw_job_count)
+ __string(dev, dev_name(sched_job->sched->dev))
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, client_id)
),
TP_fast_assign(
- __entry->entity = entity;
- __entry->id = sched_job->id;
- __entry->fence = &sched_job->s_fence->finished;
__assign_str(name);
__entry->job_count = spsc_queue_count(&entity->job_queue);
__entry->hw_job_count = atomic_read(
&sched_job->sched->credit_count);
+ __assign_str(dev);
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+ __entry->client_id = sched_job->s_fence->drm_client_id;
),
- TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
- __entry->entity, __entry->id,
- __entry->fence, __get_str(name),
- __entry->job_count, __entry->hw_job_count)
+ TP_printk("dev=%s, fence=%llu:%llu, ring=%s, job count:%u, hw job count:%d, client_id:%llu",
+ __get_str(dev),
+ __entry->fence_context, __entry->fence_seqno, __get_str(name),
+ __entry->job_count, __entry->hw_job_count, __entry->client_id)
);
-DEFINE_EVENT(drm_sched_job, drm_sched_job,
+DEFINE_EVENT(drm_sched_job, drm_sched_job_queue,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity)
);
-DEFINE_EVENT(drm_sched_job, drm_run_job,
+DEFINE_EVENT(drm_sched_job, drm_sched_job_run,
TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
TP_ARGS(sched_job, entity)
);
-TRACE_EVENT(drm_sched_process_job,
+TRACE_EVENT(drm_sched_job_done,
TP_PROTO(struct drm_sched_fence *fence),
TP_ARGS(fence),
TP_STRUCT__entry(
- __field(struct dma_fence *, fence)
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
),
TP_fast_assign(
- __entry->fence = &fence->finished;
+ __entry->fence_context = fence->finished.context;
+ __entry->fence_seqno = fence->finished.seqno;
),
- TP_printk("fence=%p signaled", __entry->fence)
+ TP_printk("fence=%llu:%llu signaled",
+ __entry->fence_context, __entry->fence_seqno)
);
-TRACE_EVENT(drm_sched_job_wait_dep,
+TRACE_EVENT(drm_sched_job_add_dep,
+ TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
+ TP_ARGS(sched_job, fence),
+ TP_STRUCT__entry(
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
+ ),
+
+ TP_fast_assign(
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
+ __entry->ctx = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+ TP_printk("fence=%llu:%llu depends on fence=%llu:%llu",
+ __entry->fence_context, __entry->fence_seqno,
+ __entry->ctx, __entry->seqno)
+);
+
+TRACE_EVENT(drm_sched_job_unschedulable,
TP_PROTO(struct drm_sched_job *sched_job, struct dma_fence *fence),
TP_ARGS(sched_job, fence),
TP_STRUCT__entry(
- __string(name, sched_job->sched->name)
- __field(uint64_t, id)
- __field(struct dma_fence *, fence)
- __field(uint64_t, ctx)
- __field(unsigned, seqno)
+ __field(u64, fence_context)
+ __field(u64, fence_seqno)
+ __field(u64, ctx)
+ __field(u64, seqno)
),
TP_fast_assign(
- __assign_str(name);
- __entry->id = sched_job->id;
- __entry->fence = fence;
+ __entry->fence_context = sched_job->s_fence->finished.context;
+ __entry->fence_seqno = sched_job->s_fence->finished.seqno;
__entry->ctx = fence->context;
__entry->seqno = fence->seqno;
),
- TP_printk("job ring=%s, id=%llu, depends fence=%p, context=%llu, seq=%u",
- __get_str(name), __entry->id,
- __entry->fence, __entry->ctx,
- __entry->seqno)
+ TP_printk("fence=%llu:%llu depends on unsignalled fence=%llu:%llu",
+ __entry->fence_context, __entry->fence_seqno,
+ __entry->ctx, __entry->seqno)
);
#endif /* _GPU_SCHED_TRACE_H_ */
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index e671aa241720..5635b3a826d8 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -21,7 +21,7 @@
*
*/
-#include <linux/kthread.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/completion.h>
@@ -478,10 +478,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
while ((entity->dependency =
drm_sched_job_dependency(sched_job, entity))) {
- trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
-
- if (drm_sched_entity_add_dependency_cb(entity))
+ if (drm_sched_entity_add_dependency_cb(entity)) {
+ trace_drm_sched_job_unschedulable(sched_job, entity->dependency);
return NULL;
+ }
}
/* skip jobs from entity that marked guilty */
@@ -546,10 +546,10 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
return;
/*
- * Only when the queue is empty are we guaranteed that the scheduler
- * thread cannot change ->last_scheduled. To enforce ordering we need
- * a read barrier here. See drm_sched_entity_pop_job() for the other
- * side.
+ * Only when the queue is empty are we guaranteed that
+ * drm_sched_run_job_work() cannot change entity->last_scheduled. To
+ * enforce ordering we need a read barrier here. See
+ * drm_sched_entity_pop_job() for the other side.
*/
smp_rmb();
@@ -587,7 +587,15 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
bool first;
ktime_t submit_ts;
- trace_drm_sched_job(sched_job, entity);
+ trace_drm_sched_job_queue(sched_job, entity);
+
+ if (trace_drm_sched_job_add_dep_enabled()) {
+ struct dma_fence *entry;
+ unsigned long index;
+
+ xa_for_each(&sched_job->dependencies, index, entry)
+ trace_drm_sched_job_add_dep(sched_job, entry);
+ }
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index e971528504a5..9391d6f0dc01 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -21,7 +21,7 @@
*
*/
-#include <linux/kthread.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -206,7 +206,8 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
EXPORT_SYMBOL(to_drm_sched_fence);
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
- void *owner)
+ void *owner,
+ u64 drm_client_id)
{
struct drm_sched_fence *fence = NULL;
@@ -215,6 +216,7 @@ struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
return NULL;
fence->owner = owner;
+ fence->drm_client_id = drm_client_id;
spin_lock_init(&fence->lock);
return fence;
diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h
index 599cf6e1bb74..7ea5a6736f98 100644
--- a/drivers/gpu/drm/scheduler/sched_internal.h
+++ b/drivers/gpu/drm/scheduler/sched_internal.h
@@ -24,7 +24,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *s_entity,
- void *owner);
+ void *owner, u64 drm_client_id);
void drm_sched_fence_init(struct drm_sched_fence *fence,
struct drm_sched_entity *entity);
void drm_sched_fence_free(struct drm_sched_fence *fence);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 829579c41c6b..c63543132f9d 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -66,6 +66,7 @@
* This implies waiting for previously executed jobs.
*/
+#include <linux/export.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
@@ -391,7 +392,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
- * Finish the job's fence and wake up the worker thread.
+ * Finish the job's fence and resubmit the work items.
*/
static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
{
@@ -401,7 +402,7 @@ static void drm_sched_job_done(struct drm_sched_job *s_job, int result)
atomic_sub(s_job->credits, &sched->credit_count);
atomic_dec(sched->score);
- trace_drm_sched_process_job(s_fence);
+ trace_drm_sched_job_done(s_fence);
dma_fence_get(&s_fence->finished);
drm_sched_fence_finished(s_fence, result);
@@ -551,9 +552,10 @@ static void drm_sched_job_timedout(struct work_struct *work)
if (job) {
/*
- * Remove the bad job so it cannot be freed by concurrent
- * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread
- * is parked at which point it's safe.
+ * Remove the bad job so it cannot be freed by a concurrent
+ * &struct drm_sched_backend_ops.free_job. It will be
+ * reinserted after the scheduler's work items have been
+ * cancelled, at which point it's safe.
*/
list_del_init(&job->list);
spin_unlock(&sched->job_list_lock);
@@ -599,10 +601,10 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
/*
* Reinsert back the bad job here - now it's safe as
- * drm_sched_get_finished_job cannot race against us and release the
+ * drm_sched_get_finished_job() cannot race against us and release the
* bad job at this point - we parked (waited for) any in progress
- * (earlier) cleanups and drm_sched_get_finished_job will not be called
- * now until the scheduler thread is unparked.
+ * (earlier) cleanups and drm_sched_get_finished_job() will not be
+ * called now until the scheduler's work items are submitted again.
*/
if (bad && bad->sched == sched)
/*
@@ -615,7 +617,8 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* Iterate the job list from later to earlier one and either deactive
* their HW callbacks or remove them from pending list if they already
* signaled.
- * This iteration is thread safe as sched thread is stopped.
+ * This iteration is thread safe as the scheduler's work items have been
+ * cancelled.
*/
list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list,
list) {
@@ -680,9 +683,9 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, int errno)
struct drm_sched_job *s_job, *tmp;
/*
- * Locking the list is not required here as the sched thread is parked
- * so no new jobs are being inserted or removed. Also concurrent
- * GPU recovers can't run in parallel.
+ * Locking the list is not required here as the scheduler's work items
+ * are currently not running, so no new jobs are being inserted or
+ * removed. Also concurrent GPU recovers can't run in parallel.
*/
list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) {
struct dma_fence *fence = s_job->s_fence->parent;
@@ -764,6 +767,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
* @credits: the number of credits this job contributes to the schedulers
* credit limit
* @owner: job owner for debugging
+ * @drm_client_id: &struct drm_file.client_id of the owner (used by trace
+ * events)
*
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
@@ -784,7 +789,8 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs);
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- u32 credits, void *owner)
+ u32 credits, void *owner,
+ uint64_t drm_client_id)
{
if (!entity->rq) {
/* This will most likely be followed by missing frames
@@ -810,7 +816,7 @@ int drm_sched_job_init(struct drm_sched_job *job,
job->entity = entity;
job->credits = credits;
- job->s_fence = drm_sched_fence_alloc(entity, owner);
+ job->s_fence = drm_sched_fence_alloc(entity, owner, drm_client_id);
if (!job->s_fence)
return -ENOMEM;
@@ -850,7 +856,6 @@ void drm_sched_job_arm(struct drm_sched_job *job)
job->sched = sched;
job->s_priority = entity->priority;
- job->id = atomic64_inc_return(&sched->job_id_count);
drm_sched_fence_init(job->s_fence, job->entity);
}
@@ -1229,7 +1234,7 @@ static void drm_sched_run_job_work(struct work_struct *w)
atomic_add(sched_job->credits, &sched->credit_count);
drm_sched_job_begin(sched_job);
- trace_drm_run_job(sched_job, entity);
+ trace_drm_sched_job_run(sched_job, entity);
/*
* The run_job() callback must by definition return a fence whose
* refcount has been incremented for the scheduler already.
diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
index f999c8859cf7..7f947ab9d322 100644
--- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
+++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c
@@ -64,7 +64,7 @@ static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job)
job->flags |= DRM_MOCK_SCHED_JOB_DONE;
list_move_tail(&job->link, &sched->done_list);
- dma_fence_signal(&job->hw_fence);
+ dma_fence_signal_locked(&job->hw_fence);
complete(&job->done);
}
@@ -117,13 +117,13 @@ drm_mock_sched_job_new(struct kunit *test,
ret = drm_sched_job_init(&job->base,
&entity->base,
1,
- NULL);
+ NULL,
+ 1);
KUNIT_ASSERT_EQ(test, ret, 0);
job->test = test;
init_completion(&job->done);
- spin_lock_init(&job->lock);
INIT_LIST_HEAD(&job->link);
hrtimer_setup(&job->timer, drm_mock_sched_job_signal_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -169,7 +169,7 @@ static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job)
dma_fence_init(&job->hw_fence,
&drm_mock_sched_hw_fence_ops,
- &job->lock,
+ &sched->lock,
sched->hw_timeline.context,
atomic_inc_return(&sched->hw_timeline.next_seqno));
diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h
index 27caf8285fb7..fbba38137f0c 100644
--- a/drivers/gpu/drm/scheduler/tests/sched_tests.h
+++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h
@@ -106,7 +106,6 @@ struct drm_mock_sched_job {
unsigned int duration_us;
ktime_t finish_at;
- spinlock_t lock;
struct dma_fence hw_fence;
struct kunit *test;
diff --git a/drivers/gpu/drm/sitronix/Kconfig b/drivers/gpu/drm/sitronix/Kconfig
index c069d0d41775..6de7d92d9b74 100644
--- a/drivers/gpu/drm/sitronix/Kconfig
+++ b/drivers/gpu/drm/sitronix/Kconfig
@@ -5,15 +5,12 @@ config DRM_ST7571_I2C
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
select REGMAP_I2C
+ select VIDEOMODE_HELPERS
help
DRM driver for Sitronix ST7571 panels controlled over I2C.
if M is selected the module will be called st7571-i2c.
-config TINYDRM_ST7586
- tristate
- default n
-
config DRM_ST7586
tristate "DRM support for Sitronix ST7586 display panels"
depends on DRM && SPI
@@ -21,17 +18,12 @@ config DRM_ST7586
select DRM_KMS_HELPER
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
- default TINYDRM_ST7586
help
DRM driver for the following Sitronix ST7586 panels:
* LEGO MINDSTORMS EV3
If M is selected the module will be called st7586.
-config TINYDRM_ST7735R
- tristate
- default n
-
config DRM_ST7735R
tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
@@ -40,7 +32,6 @@ config DRM_ST7735R
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
- default TINYDRM_ST7735R
help
DRM driver for Sitronix ST7715R/ST7735R with one of the following
LCDs:
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 74a1eef4674e..7484d3c3f4ed 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -97,7 +97,7 @@ struct sti_dvo {
struct dvo_config *config;
bool enabled;
struct drm_encoder *encoder;
- struct drm_bridge *bridge;
+ struct drm_bridge bridge;
};
struct sti_dvo_connector {
@@ -439,7 +439,6 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
struct drm_encoder *encoder;
struct sti_dvo_connector *connector;
struct drm_connector *drm_connector;
- struct drm_bridge *bridge;
int err;
/* Set the drm device handle */
@@ -455,20 +454,14 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
connector->dvo = dvo;
- bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
- if (!bridge)
- return -ENOMEM;
-
- bridge->driver_private = dvo;
- bridge->funcs = &sti_dvo_bridge_funcs;
- bridge->of_node = dvo->dev.of_node;
- drm_bridge_add(bridge);
+ dvo->bridge.driver_private = dvo;
+ dvo->bridge.of_node = dvo->dev.of_node;
+ drm_bridge_add(&dvo->bridge);
- err = drm_bridge_attach(encoder, bridge, NULL, 0);
+ err = drm_bridge_attach(encoder, &dvo->bridge, NULL, 0);
if (err)
return err;
- dvo->bridge = bridge;
connector->encoder = encoder;
dvo->encoder = encoder;
@@ -490,7 +483,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
return 0;
err_sysfs:
- drm_bridge_remove(bridge);
+ drm_bridge_remove(&dvo->bridge);
return -EINVAL;
}
@@ -499,7 +492,7 @@ static void sti_dvo_unbind(struct device *dev,
{
struct sti_dvo *dvo = dev_get_drvdata(dev);
- drm_bridge_remove(dvo->bridge);
+ drm_bridge_remove(&dvo->bridge);
}
static const struct component_ops sti_dvo_ops = {
@@ -515,10 +508,10 @@ static int sti_dvo_probe(struct platform_device *pdev)
DRM_INFO("%s\n", __func__);
- dvo = devm_kzalloc(dev, sizeof(*dvo), GFP_KERNEL);
- if (!dvo) {
- DRM_ERROR("Failed to allocate memory for DVO\n");
- return -ENOMEM;
+ dvo = devm_drm_bridge_alloc(dev, struct sti_dvo, bridge, &sti_dvo_bridge_funcs);
+ if (IS_ERR(dvo)) {
+ DRM_ERROR("Failed to allocate DVO\n");
+ return PTR_ERR(dvo);
}
dvo->dev = pdev->dev;
diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c
index a3ae9a93ce66..07788e8d3d83 100644
--- a/drivers/gpu/drm/stm/lvds.c
+++ b/drivers/gpu/drm/stm/lvds.c
@@ -1049,9 +1049,9 @@ static int lvds_probe(struct platform_device *pdev)
dev_dbg(dev, "Probing LVDS driver...\n");
- lvds = devm_kzalloc(dev, sizeof(*lvds), GFP_KERNEL);
- if (!lvds)
- return -ENOMEM;
+ lvds = devm_drm_bridge_alloc(dev, struct stm_lvds, lvds_bridge, &lvds_bridge_funcs);
+ if (IS_ERR(lvds))
+ return PTR_ERR(lvds);
lvds->dev = dev;
@@ -1164,7 +1164,6 @@ static int lvds_probe(struct platform_device *pdev)
goto err_lvds_probe;
}
- lvds->lvds_bridge.funcs = &lvds_bridge_funcs;
lvds->lvds_bridge.of_node = dev->of_node;
lvds->hw_version = lvds_read(lvds, LVDS_VERR);
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c
index 58480d8e4f70..c100d29b1a89 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.c
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.c
@@ -212,7 +212,7 @@ void sun8i_csc_set_ccsc_coefficients(struct sun8i_mixer *mixer, int layer,
{
u32 base;
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
sun8i_de3_ccsc_set_coefficients(mixer->engine.regs, layer,
mode, encoding, range);
return;
@@ -228,7 +228,7 @@ void sun8i_csc_enable_ccsc(struct sun8i_mixer *mixer, int layer, bool enable)
{
u32 base;
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
sun8i_de3_ccsc_enable(mixer->engine.regs, layer, enable);
return;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 8b41d33baa30..31a8409b98f4 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -274,6 +274,7 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine,
{
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
u32 bld_base = sun8i_blender_base(mixer);
+ struct regmap *bld_regs = sun8i_blender_regmap(mixer);
struct drm_plane_state *plane_state;
struct drm_plane *plane;
u32 route = 0, pipe_en = 0;
@@ -313,12 +314,13 @@ static void sun8i_mixer_commit(struct sunxi_engine *engine,
pipe_en |= SUN8I_MIXER_BLEND_PIPE_CTL_EN(zpos);
}
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ROUTE(bld_base), route);
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_ROUTE(bld_base), route);
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_PIPE_CTL(bld_base),
pipe_en | SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
- regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
- SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
+ if (mixer->cfg->de_type != SUN8I_MIXER_DE33)
+ regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_DBUFF,
+ SUN8I_MIXER_GLOBAL_DBUFF_ENABLE);
}
static struct drm_plane **sun8i_layers_init(struct drm_device *drm,
@@ -367,25 +369,31 @@ static void sun8i_mixer_mode_set(struct sunxi_engine *engine,
const struct drm_display_mode *mode)
{
struct sun8i_mixer *mixer = engine_to_sun8i_mixer(engine);
+ struct regmap *bld_regs;
u32 bld_base, size, val;
bool interlaced;
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
size = SUN8I_MIXER_SIZE(mode->hdisplay, mode->vdisplay);
DRM_DEBUG_DRIVER("Updating global size W: %u H: %u\n",
mode->hdisplay, mode->vdisplay);
- regmap_write(engine->regs, SUN8I_MIXER_GLOBAL_SIZE, size);
- regmap_write(engine->regs, SUN8I_MIXER_BLEND_OUTSIZE(bld_base), size);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ regmap_write(mixer->top_regs, SUN50I_MIXER_GLOBAL_SIZE, size);
+ else
+ regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_SIZE, size);
+
+ regmap_write(bld_regs, SUN8I_MIXER_BLEND_OUTSIZE(bld_base), size);
if (interlaced)
val = SUN8I_MIXER_BLEND_OUTCTL_INTERLACED;
else
val = 0;
- regmap_update_bits(engine->regs, SUN8I_MIXER_BLEND_OUTCTL(bld_base),
+ regmap_update_bits(bld_regs, SUN8I_MIXER_BLEND_OUTCTL(bld_base),
SUN8I_MIXER_BLEND_OUTCTL_INTERLACED, val);
DRM_DEBUG_DRIVER("Switching display mixer interlaced mode %s\n",
@@ -399,12 +407,29 @@ static const struct sunxi_engine_ops sun8i_engine_ops = {
};
static const struct regmap_config sun8i_mixer_regmap_config = {
+ .name = "layers",
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
.max_register = 0xffffc, /* guessed */
};
+static const struct regmap_config sun8i_top_regmap_config = {
+ .name = "top",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x3c,
+};
+
+static const struct regmap_config sun8i_disp_regmap_config = {
+ .name = "display",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x20000,
+};
+
static int sun8i_mixer_of_get_id(struct device_node *node)
{
struct device_node *ep, *remote;
@@ -425,6 +450,50 @@ static int sun8i_mixer_of_get_id(struct device_node *node)
return of_ep.id;
}
+static void sun8i_mixer_init(struct sun8i_mixer *mixer)
+{
+ struct regmap *top_regs, *disp_regs;
+ unsigned int base = sun8i_blender_base(mixer);
+ int plane_cnt, i;
+
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ top_regs = mixer->top_regs;
+ disp_regs = mixer->disp_regs;
+ } else {
+ top_regs = mixer->engine.regs;
+ disp_regs = mixer->engine.regs;
+ }
+
+ /* Enable the mixer */
+ regmap_write(top_regs, SUN8I_MIXER_GLOBAL_CTL,
+ SUN8I_MIXER_GLOBAL_CTL_RT_EN);
+
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ regmap_write(top_regs, SUN50I_MIXER_GLOBAL_CLK, 1);
+
+ /* Set background color to black */
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
+ SUN8I_MIXER_BLEND_COLOR_BLACK);
+
+ /*
+ * Set fill color of bottom plane to black. Generally not needed
+ * except when VI plane is at bottom (zpos = 0) and enabled.
+ */
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
+ SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
+ regmap_write(disp_regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
+ SUN8I_MIXER_BLEND_COLOR_BLACK);
+
+ plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
+ for (i = 0; i < plane_cnt; i++)
+ regmap_write(disp_regs,
+ SUN8I_MIXER_BLEND_MODE(base, i),
+ SUN8I_MIXER_BLEND_MODE_DEF);
+
+ regmap_update_bits(disp_regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
+ SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+}
+
static int sun8i_mixer_bind(struct device *dev, struct device *master,
void *data)
{
@@ -433,8 +502,6 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct sun4i_drv *drv = drm->dev_private;
struct sun8i_mixer *mixer;
void __iomem *regs;
- unsigned int base;
- int plane_cnt;
int i, ret;
/*
@@ -495,6 +562,30 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
return PTR_ERR(mixer->engine.regs);
}
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ regs = devm_platform_ioremap_resource_byname(pdev, "top");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mixer->top_regs = devm_regmap_init_mmio(dev, regs,
+ &sun8i_top_regmap_config);
+ if (IS_ERR(mixer->top_regs)) {
+ dev_err(dev, "Couldn't create the top regmap\n");
+ return PTR_ERR(mixer->top_regs);
+ }
+
+ regs = devm_platform_ioremap_resource_byname(pdev, "display");
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ mixer->disp_regs = devm_regmap_init_mmio(dev, regs,
+ &sun8i_disp_regmap_config);
+ if (IS_ERR(mixer->disp_regs)) {
+ dev_err(dev, "Couldn't create the disp regmap\n");
+ return PTR_ERR(mixer->disp_regs);
+ }
+ }
+
mixer->reset = devm_reset_control_get(dev, NULL);
if (IS_ERR(mixer->reset)) {
dev_err(dev, "Couldn't get our reset line\n");
@@ -534,10 +625,8 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
list_add_tail(&mixer->engine.list, &drv->engine_list);
- base = sun8i_blender_base(mixer);
-
/* Reset registers and disable unused sub-engines */
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3) {
for (i = 0; i < DE3_MIXER_UNIT_SIZE; i += 4)
regmap_write(mixer->engine.regs, i, 0);
@@ -551,7 +640,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN50I_MIXER_FMT_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC0_EN, 0);
regmap_write(mixer->engine.regs, SUN50I_MIXER_CDC1_EN, 0);
- } else {
+ } else if (mixer->cfg->de_type == SUN8I_MIXER_DE2) {
for (i = 0; i < DE2_MIXER_UNIT_SIZE; i += 4)
regmap_write(mixer->engine.regs, i, 0);
@@ -564,31 +653,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
regmap_write(mixer->engine.regs, SUN8I_MIXER_DCSC_EN, 0);
}
- /* Enable the mixer */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_GLOBAL_CTL,
- SUN8I_MIXER_GLOBAL_CTL_RT_EN);
-
- /* Set background color to black */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_BKCOLOR(base),
- SUN8I_MIXER_BLEND_COLOR_BLACK);
-
- /*
- * Set fill color of bottom plane to black. Generally not needed
- * except when VI plane is at bottom (zpos = 0) and enabled.
- */
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
- SUN8I_MIXER_BLEND_PIPE_CTL_FC_EN(0));
- regmap_write(mixer->engine.regs, SUN8I_MIXER_BLEND_ATTR_FCOLOR(base, 0),
- SUN8I_MIXER_BLEND_COLOR_BLACK);
-
- plane_cnt = mixer->cfg->vi_num + mixer->cfg->ui_num;
- for (i = 0; i < plane_cnt; i++)
- regmap_write(mixer->engine.regs,
- SUN8I_MIXER_BLEND_MODE(base, i),
- SUN8I_MIXER_BLEND_MODE_DEF);
-
- regmap_update_bits(mixer->engine.regs, SUN8I_MIXER_BLEND_PIPE_CTL(base),
- SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK, 0);
+ sun8i_mixer_init(mixer);
return 0;
@@ -628,6 +693,7 @@ static void sun8i_mixer_remove(struct platform_device *pdev)
static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
.ui_num = 3,
@@ -636,6 +702,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
.ui_num = 1,
@@ -644,6 +711,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 432000000,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
@@ -653,6 +721,7 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.scanline_yuv = 2048,
@@ -662,6 +731,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -670,6 +740,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
};
static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
+ .de_type = SUN8I_MIXER_DE2,
.vi_num = 2,
.ui_num = 1,
.scaler_mask = 0x3,
@@ -680,6 +751,7 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = {
.ccsc = CCSC_D1_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -689,6 +761,7 @@ static const struct sun8i_mixer_cfg sun20i_d1_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x1,
.scanline_yuv = 1024,
@@ -698,6 +771,7 @@ static const struct sun8i_mixer_cfg sun20i_d1_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0xf,
.scanline_yuv = 4096,
@@ -707,6 +781,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
.ccsc = CCSC_MIXER1_LAYOUT,
+ .de_type = SUN8I_MIXER_DE2,
.mod_rate = 297000000,
.scaler_mask = 0x3,
.scanline_yuv = 2048,
@@ -716,7 +791,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.ccsc = CCSC_MIXER0_LAYOUT,
- .is_de3 = true,
+ .de_type = SUN8I_MIXER_DE3,
.mod_rate = 600000000,
.scaler_mask = 0xf,
.scanline_yuv = 4096,
@@ -724,6 +799,17 @@ static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.vi_num = 1,
};
+static const struct sun8i_mixer_cfg sun50i_h616_mixer0_cfg = {
+ .ccsc = CCSC_MIXER0_LAYOUT,
+ .de_type = SUN8I_MIXER_DE33,
+ .mod_rate = 600000000,
+ .scaler_mask = 0xf,
+ .scanline_yuv = 4096,
+ .ui_num = 3,
+ .vi_num = 1,
+ .map = {0, 6, 7, 8},
+};
+
static const struct of_device_id sun8i_mixer_of_table[] = {
{
.compatible = "allwinner,sun8i-a83t-de2-mixer-0",
@@ -769,6 +855,10 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.compatible = "allwinner,sun50i-h6-de3-mixer-0",
.data = &sun50i_h6_mixer0_cfg,
},
+ {
+ .compatible = "allwinner,sun50i-h616-de33-mixer-0",
+ .data = &sun50i_h616_mixer0_cfg,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index d7898c9c9cc0..a1c1cbccc654 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -21,6 +21,9 @@
#define SUN8I_MIXER_GLOBAL_DBUFF 0x8
#define SUN8I_MIXER_GLOBAL_SIZE 0xc
+#define SUN50I_MIXER_GLOBAL_SIZE 0x8
+#define SUN50I_MIXER_GLOBAL_CLK 0xc
+
#define SUN8I_MIXER_GLOBAL_CTL_RT_EN BIT(0)
#define SUN8I_MIXER_GLOBAL_DBUFF_ENABLE BIT(0)
@@ -151,6 +154,12 @@ enum {
CCSC_D1_MIXER0_LAYOUT,
};
+enum sun8i_mixer_type {
+ SUN8I_MIXER_DE2,
+ SUN8I_MIXER_DE3,
+ SUN8I_MIXER_DE33,
+};
+
/**
* struct sun8i_mixer_cfg - mixer HW configuration
* @vi_num: number of VI channels
@@ -162,8 +171,9 @@ enum {
* @ccsc: select set of CCSC base addresses from the enumeration above.
* @mod_rate: module clock rate that needs to be set in order to have
* a functional block.
- * @is_de3: true, if this is next gen display engine 3.0, false otherwise.
+ * @de_type: sun8i_mixer_type enum representing the display engine generation.
* @scaline_yuv: size of a scanline for VI scaler for YUV formats.
+ * @map: channel map for DE variants processing YUV separately (DE33)
*/
struct sun8i_mixer_cfg {
int vi_num;
@@ -171,8 +181,9 @@ struct sun8i_mixer_cfg {
int scaler_mask;
int ccsc;
unsigned long mod_rate;
- unsigned int is_de3 : 1;
+ unsigned int de_type;
unsigned int scanline_yuv;
+ unsigned int map[6];
};
struct sun8i_mixer {
@@ -184,6 +195,9 @@ struct sun8i_mixer {
struct clk *bus_clk;
struct clk *mod_clk;
+
+ struct regmap *top_regs;
+ struct regmap *disp_regs;
};
enum {
@@ -214,13 +228,22 @@ engine_to_sun8i_mixer(struct sunxi_engine *engine)
static inline u32
sun8i_blender_base(struct sun8i_mixer *mixer)
{
- return mixer->cfg->is_de3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+ return mixer->cfg->de_type == SUN8I_MIXER_DE3 ? DE3_BLD_BASE : DE2_BLD_BASE;
+}
+
+static inline struct regmap *
+sun8i_blender_regmap(struct sun8i_mixer *mixer)
+{
+ return mixer->cfg->de_type == SUN8I_MIXER_DE33 ?
+ mixer->disp_regs : mixer->engine.regs;
}
static inline u32
sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
{
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ return mixer->cfg->map[channel] * 0x20000 + DE2_CH_SIZE;
+ else if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_CH_BASE + channel * DE3_CH_SIZE;
else
return DE2_CH_BASE + channel * DE2_CH_SIZE;
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index b90e5edef4e8..f97be0040aab 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -23,6 +23,7 @@
#include "sun8i_mixer.h"
#include "sun8i_ui_layer.h"
#include "sun8i_ui_scaler.h"
+#include "sun8i_vi_scaler.h"
static void sun8i_ui_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
@@ -51,6 +52,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
{
struct drm_plane_state *state = plane->state;
u32 src_w, src_h, dst_w, dst_h;
+ struct regmap *bld_regs;
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
@@ -59,6 +61,7 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
channel, overlay);
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
ch_base = sun8i_channel_base(mixer, channel);
src_w = drm_rect_width(&state->src) >> 16;
@@ -91,22 +94,34 @@ static int sun8i_ui_layer_update_coord(struct sun8i_mixer *mixer, int channel,
hscale = state->src_w / state->crtc_w;
vscale = state->src_h / state->crtc_h;
- sun8i_ui_scaler_setup(mixer, channel, src_w, src_h, dst_w,
- dst_h, hscale, vscale, hphase, vphase);
- sun8i_ui_scaler_enable(mixer, channel, true);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33) {
+ sun8i_vi_scaler_setup(mixer, channel, src_w, src_h,
+ dst_w, dst_h, hscale, vscale,
+ hphase, vphase,
+ state->fb->format);
+ sun8i_vi_scaler_enable(mixer, channel, true);
+ } else {
+ sun8i_ui_scaler_setup(mixer, channel, src_w, src_h,
+ dst_w, dst_h, hscale, vscale,
+ hphase, vphase);
+ sun8i_ui_scaler_enable(mixer, channel, true);
+ }
} else {
DRM_DEBUG_DRIVER("HW scaling is not needed\n");
- sun8i_ui_scaler_enable(mixer, channel, false);
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ sun8i_vi_scaler_enable(mixer, channel, false);
+ else
+ sun8i_ui_scaler_enable(mixer, channel, false);
}
/* Set base coordinates */
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
index ae0806bccac7..8b7a58e27517 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_scaler.c
@@ -93,7 +93,7 @@ static u32 sun8i_ui_scaler_base(struct sun8i_mixer *mixer, int channel)
{
int vi_num = mixer->cfg->vi_num;
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_VI_SCALER_UNIT_BASE +
DE3_VI_SCALER_UNIT_SIZE * vi_num +
DE3_UI_SCALER_UNIT_SIZE * (channel - vi_num);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 9c09d9c08496..a09ee4097537 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -25,7 +25,7 @@ static void sun8i_vi_layer_update_alpha(struct sun8i_mixer *mixer, int channel,
ch_base = sun8i_channel_base(mixer, channel);
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
mask = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK |
SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MODE_MASK;
val = SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA
@@ -55,6 +55,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
struct drm_plane_state *state = plane->state;
const struct drm_format_info *format = state->fb->format;
u32 src_w, src_h, dst_w, dst_h;
+ struct regmap *bld_regs;
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
@@ -66,6 +67,7 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
channel, overlay);
bld_base = sun8i_blender_base(mixer);
+ bld_regs = sun8i_blender_regmap(mixer);
ch_base = sun8i_channel_base(mixer, channel);
src_w = drm_rect_width(&state->src) >> 16;
@@ -183,10 +185,10 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
DRM_DEBUG_DRIVER("Layer destination size W: %d H: %d\n", dst_w, dst_h);
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_COORD(bld_base, zpos),
SUN8I_MIXER_COORD(state->dst.x1, state->dst.y1));
- regmap_write(mixer->engine.regs,
+ regmap_write(bld_regs,
SUN8I_MIXER_BLEND_ATTR_INSIZE(bld_base, zpos),
outsize);
@@ -483,7 +485,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
if (!layer)
return ERR_PTR(-ENOMEM);
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
formats = sun8i_vi_layer_de3_formats;
format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
} else {
@@ -507,7 +509,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
plane_cnt = mixer->cfg->ui_num + mixer->cfg->vi_num;
- if (mixer->cfg->vi_num == 1 || mixer->cfg->is_de3) {
+ if (mixer->cfg->vi_num == 1 || mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
ret = drm_plane_create_alpha_property(&layer->plane);
if (ret) {
dev_err(drm->dev, "Couldn't add alpha property\n");
@@ -524,7 +526,7 @@ struct sun8i_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709);
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3)
supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020);
supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
index 7ba75011adf9..82df6244af88 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_scaler.c
@@ -835,7 +835,9 @@ static const u32 bicubic4coefftab32[480] = {
static u32 sun8i_vi_scaler_base(struct sun8i_mixer *mixer, int channel)
{
- if (mixer->cfg->is_de3)
+ if (mixer->cfg->de_type == SUN8I_MIXER_DE33)
+ return sun8i_channel_base(mixer, channel) + 0x3000;
+ else if (mixer->cfg->de_type == SUN8I_MIXER_DE3)
return DE3_VI_SCALER_UNIT_BASE +
DE3_VI_SCALER_UNIT_SIZE * channel;
else
@@ -956,7 +958,7 @@ void sun8i_vi_scaler_setup(struct sun8i_mixer *mixer, int layer,
cvphase = vphase;
}
- if (mixer->cfg->is_de3) {
+ if (mixer->cfg->de_type >= SUN8I_MIXER_DE3) {
u32 val;
if (format->hsub == 1 && format->vsub == 1)
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
index cb08a88242cc..1424b63dde99 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_helper.h
@@ -93,6 +93,10 @@ static inline struct drm_sysfb_device *to_drm_sysfb_device(struct drm_device *de
* Plane
*/
+size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out);
+
int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state);
void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
index ffaa2522ab96..1bcdb5ee8f09 100644
--- a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
+++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c
@@ -47,6 +47,144 @@ EXPORT_SYMBOL(drm_sysfb_mode);
* Plane
*/
+static u32 to_nonalpha_fourcc(u32 fourcc)
+{
+ /* only handle formats with depth != 0 and alpha channel */
+ switch (fourcc) {
+ case DRM_FORMAT_ARGB1555:
+ return DRM_FORMAT_XRGB1555;
+ case DRM_FORMAT_ABGR1555:
+ return DRM_FORMAT_XBGR1555;
+ case DRM_FORMAT_RGBA5551:
+ return DRM_FORMAT_RGBX5551;
+ case DRM_FORMAT_BGRA5551:
+ return DRM_FORMAT_BGRX5551;
+ case DRM_FORMAT_ARGB8888:
+ return DRM_FORMAT_XRGB8888;
+ case DRM_FORMAT_ABGR8888:
+ return DRM_FORMAT_XBGR8888;
+ case DRM_FORMAT_RGBA8888:
+ return DRM_FORMAT_RGBX8888;
+ case DRM_FORMAT_BGRA8888:
+ return DRM_FORMAT_BGRX8888;
+ case DRM_FORMAT_ARGB2101010:
+ return DRM_FORMAT_XRGB2101010;
+ case DRM_FORMAT_ABGR2101010:
+ return DRM_FORMAT_XBGR2101010;
+ case DRM_FORMAT_RGBA1010102:
+ return DRM_FORMAT_RGBX1010102;
+ case DRM_FORMAT_BGRA1010102:
+ return DRM_FORMAT_BGRX1010102;
+ }
+
+ return fourcc;
+}
+
+static bool is_listed_fourcc(const u32 *fourccs, size_t nfourccs, u32 fourcc)
+{
+ const u32 *fourccs_end = fourccs + nfourccs;
+
+ while (fourccs < fourccs_end) {
+ if (*fourccs == fourcc)
+ return true;
+ ++fourccs;
+ }
+ return false;
+}
+
+/**
+ * drm_sysfb_build_fourcc_list - Filters a list of supported color formats against
+ * the device's native formats
+ * @dev: DRM device
+ * @native_fourccs: 4CC codes of natively supported color formats
+ * @native_nfourccs: The number of entries in @native_fourccs
+ * @fourccs_out: Returns 4CC codes of supported color formats
+ * @nfourccs_out: The number of available entries in @fourccs_out
+ *
+ * This function create a list of supported color format from natively
+ * supported formats and additional emulated formats.
+ * At a minimum, most userspace programs expect at least support for
+ * XRGB8888 on the primary plane. Sysfb devices that have to emulate
+ * the format should use drm_sysfb_build_fourcc_list() to create a list
+ * of supported color formats. The returned list can be handed over to
+ * drm_universal_plane_init() et al. Native formats will go before
+ * emulated formats. Native formats with alpha channel will be replaced
+ * by equal formats without alpha channel, as primary planes usually
+ * don't support alpha. Other heuristics might be applied to optimize
+ * the sorting order. Formats near the beginning of the list are usually
+ * preferred over formats near the end of the list.
+ *
+ * Returns:
+ * The number of color-formats 4CC codes returned in @fourccs_out.
+ */
+size_t drm_sysfb_build_fourcc_list(struct drm_device *dev,
+ const u32 *native_fourccs, size_t native_nfourccs,
+ u32 *fourccs_out, size_t nfourccs_out)
+{
+ /*
+ * XRGB8888 is the default fallback format for most of userspace
+ * and it's currently the only format that should be emulated for
+ * the primary plane. Only if there's ever another default fallback,
+ * it should be added here.
+ */
+ static const u32 extra_fourccs[] = {
+ DRM_FORMAT_XRGB8888,
+ };
+ static const size_t extra_nfourccs = ARRAY_SIZE(extra_fourccs);
+
+ u32 *fourccs = fourccs_out;
+ const u32 *fourccs_end = fourccs_out + nfourccs_out;
+ size_t i;
+
+ /*
+ * The device's native formats go first.
+ */
+
+ for (i = 0; i < native_nfourccs; ++i) {
+ /*
+ * Several DTs, boot loaders and firmware report native
+ * alpha formats that are non-alpha formats instead. So
+ * replace alpha formats by non-alpha formats.
+ */
+ u32 fourcc = to_nonalpha_fourcc(native_fourccs[i]);
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring native format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc);
+
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+ /*
+ * The extra formats, emulated by the driver, go second.
+ */
+
+ for (i = 0; (i < extra_nfourccs) && (fourccs < fourccs_end); ++i) {
+ u32 fourcc = extra_fourccs[i];
+
+ if (is_listed_fourcc(fourccs_out, fourccs - fourccs_out, fourcc)) {
+ continue; /* skip duplicate and native entries */
+ } else if (fourccs == fourccs_end) {
+ drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc);
+ continue; /* end of available output buffer */
+ }
+
+ drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc);
+
+ *fourccs = fourcc;
+ ++fourccs;
+ }
+
+ return fourccs - fourccs_out;
+}
+EXPORT_SYMBOL(drm_sysfb_build_fourcc_list);
+
int drm_sysfb_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state)
{
diff --git a/drivers/gpu/drm/sysfb/efidrm.c b/drivers/gpu/drm/sysfb/efidrm.c
index 46912924636a..1883c4a8604c 100644
--- a/drivers/gpu/drm/sysfb/efidrm.c
+++ b/drivers/gpu/drm/sysfb/efidrm.c
@@ -202,7 +202,7 @@ static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
drm_dbg(dev, "framebuffer format=%p4cc, size=%dx%d, stride=%d bytes\n",
&format->format, width, height, stride);
-#ifdef CONFIG_X86
+#if defined(CONFIG_FIRMWARE_EDID)
if (drm_edid_header_is_valid(edid_info.dummy) == 8)
sysfb->edid = edid_info.dummy;
#endif
@@ -271,8 +271,8 @@ static struct efidrm_device *efidrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- efi->formats, ARRAY_SIZE(efi->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ efi->formats, ARRAY_SIZE(efi->formats));
primary_plane = &efi->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &efidrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/sysfb/ofdrm.c b/drivers/gpu/drm/sysfb/ofdrm.c
index fddfe8bea9f7..8d8ab39c5f36 100644
--- a/drivers/gpu/drm/sysfb/ofdrm.c
+++ b/drivers/gpu/drm/sysfb/ofdrm.c
@@ -8,13 +8,13 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fbdev_shmem.h>
-#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -644,36 +644,36 @@ static void ofdrm_qemu_cmap_write(struct ofdrm_device *odev, unsigned char index
writeb(b, data);
}
-static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
- const struct drm_format_info *format)
+static void ofdrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ struct drm_device *dev = crtc->dev;
+ struct ofdrm_device *odev = ofdrm_device_of_dev(dev);
+ u8 i8 = index & 0xff;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ odev->funcs->cmap_write(odev, i8, r8, g8, b8);
+}
+
+static void ofdrm_device_fill_gamma(struct ofdrm_device *odev,
+ const struct drm_format_info *format)
{
struct drm_device *dev = &odev->sysfb.dev;
- int i;
+ struct drm_crtc *crtc = &odev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
- /* Use better interpolation, to take 32 values from 0 to 255 */
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
- unsigned char r = i * 8 + i / 4;
- unsigned char g = i * 4 + i / 16;
- unsigned char b = i * 8 + i / 4;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
- unsigned char r = 0;
- unsigned char g = i * 4 + i / 16;
- unsigned char b = 0;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_fill_gamma_565(crtc, ofdrm_set_gamma_lut);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++)
- odev->funcs->cmap_write(odev, i, i, i, i);
+ drm_crtc_fill_gamma_888(crtc, ofdrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -682,42 +682,21 @@ static void ofdrm_device_set_gamma_linear(struct ofdrm_device *odev,
}
}
-static void ofdrm_device_set_gamma(struct ofdrm_device *odev,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void ofdrm_device_load_gamma(struct ofdrm_device *odev,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
struct drm_device *dev = &odev->sysfb.dev;
- int i;
+ struct drm_crtc *crtc = &odev->crtc;
switch (format->format) {
case DRM_FORMAT_RGB565:
case DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN:
- /* Use better interpolation, to take 32 values from lut[0] to lut[255] */
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE / 8; i++) {
- unsigned char r = lut[i * 8 + i / 4].red >> 8;
- unsigned char g = lut[i * 4 + i / 16].green >> 8;
- unsigned char b = lut[i * 8 + i / 4].blue >> 8;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
- /* Green has one more bit, so add padding with 0 for red and blue. */
- for (i = OFDRM_GAMMA_LUT_SIZE / 8; i < OFDRM_GAMMA_LUT_SIZE / 4; i++) {
- unsigned char r = 0;
- unsigned char g = lut[i * 4 + i / 16].green >> 8;
- unsigned char b = 0;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_load_gamma_565_from_888(crtc, lut, ofdrm_set_gamma_lut);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < OFDRM_GAMMA_LUT_SIZE; i++) {
- unsigned char r = lut[i].red >> 8;
- unsigned char g = lut[i].green >> 8;
- unsigned char b = lut[i].blue >> 8;
-
- odev->funcs->cmap_write(odev, i, r, g, b);
- }
+ drm_crtc_load_gamma_888(crtc, lut, ofdrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -753,9 +732,9 @@ static void ofdrm_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_ato
const struct drm_format_info *format = sysfb_crtc_state->format;
if (crtc_state->gamma_lut)
- ofdrm_device_set_gamma(odev, format, crtc_state->gamma_lut->data);
+ ofdrm_device_load_gamma(odev, format, crtc_state->gamma_lut->data);
else
- ofdrm_device_set_gamma_linear(odev, format);
+ ofdrm_device_fill_gamma(odev, format);
}
}
@@ -1035,8 +1014,8 @@ static struct ofdrm_device *ofdrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- odev->formats, ARRAY_SIZE(odev->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ odev->formats, ARRAY_SIZE(odev->formats));
primary_plane = &odev->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &ofdrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/sysfb/simpledrm.c b/drivers/gpu/drm/sysfb/simpledrm.c
index a1c3119330de..8530a3ef8a7a 100644
--- a/drivers/gpu/drm/sysfb/simpledrm.c
+++ b/drivers/gpu/drm/sysfb/simpledrm.c
@@ -18,7 +18,6 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_shmem.h>
-#include <drm/drm_format_helper.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -765,8 +764,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- sdev->formats, ARRAY_SIZE(sdev->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ sdev->formats, ARRAY_SIZE(sdev->formats));
primary_plane = &sdev->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &simpledrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c
index 4d62c78e7d1e..2b107958942c 100644
--- a/drivers/gpu/drm/sysfb/vesadrm.c
+++ b/drivers/gpu/drm/sysfb/vesadrm.c
@@ -9,6 +9,7 @@
#include <drm/clients/drm_client_setup.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_connector.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
@@ -87,15 +88,10 @@ static struct vesadrm_device *to_vesadrm_device(struct drm_device *dev)
static void vesadrm_vga_cmap_write(struct vesadrm_device *vesa, unsigned int index,
u16 red, u16 green, u16 blue)
{
- u8 i8, r8, g8, b8;
-
- if (index > 255)
- return;
-
- i8 = index;
- r8 = red >> 8;
- g8 = green >> 8;
- b8 = blue >> 8;
+ u8 i8 = index;
+ u8 r8 = red >> 8;
+ u8 g8 = green >> 8;
+ u8 b8 = blue >> 8;
outb_p(i8, VGA_PEL_IW);
outb_p(r8, VGA_PEL_D);
@@ -120,9 +116,6 @@ static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int ind
0x00,
};
- if (index > 255)
- return;
-
__asm__ __volatile__ (
"call *(%%esi)"
: /* no return value */
@@ -135,43 +128,36 @@ static void vesadrm_pmi_cmap_write(struct vesadrm_device *vesa, unsigned int ind
}
#endif
-static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa,
- const struct drm_format_info *format)
+static void vesadrm_set_gamma_lut(struct drm_crtc *crtc, unsigned int index,
+ u16 red, u16 green, u16 blue)
+{
+ struct drm_device *dev = crtc->dev;
+ struct vesadrm_device *vesa = to_vesadrm_device(dev);
+ u8 i8 = index & 0xff;
+
+ if (drm_WARN_ON_ONCE(dev, index != i8))
+ return; /* driver bug */
+
+ vesa->cmap_write(vesa, i8, red, green, blue);
+}
+
+static void vesadrm_fill_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format)
{
struct drm_device *dev = &vesa->sysfb.dev;
- size_t i;
- u16 r16, g16, b16;
+ struct drm_crtc *crtc = &vesa->crtc;
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- for (i = 0; i < 32; ++i) {
- r16 = i * 8 + i / 4;
- r16 |= (r16 << 8) | r16;
- vesa->cmap_write(vesa, i, r16, r16, r16);
- }
+ drm_crtc_fill_gamma_555(crtc, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB565:
- for (i = 0; i < 32; ++i) {
- r16 = i * 8 + i / 4;
- r16 |= (r16 << 8) | r16;
- g16 = i * 4 + i / 16;
- g16 |= (g16 << 8) | g16;
- b16 = r16;
- vesa->cmap_write(vesa, i, r16, g16, b16);
- }
- for (i = 32; i < 64; ++i) {
- g16 = i * 4 + i / 16;
- g16 |= (g16 << 8) | g16;
- vesa->cmap_write(vesa, i, 0, g16, 0);
- }
+ drm_crtc_fill_gamma_565(crtc, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < 256; ++i) {
- r16 = (i << 8) | i;
- vesa->cmap_write(vesa, i, r16, r16, r16);
- }
+ drm_crtc_fill_gamma_888(crtc, vesadrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -180,38 +166,24 @@ static void vesadrm_set_gamma_linear(struct vesadrm_device *vesa,
}
}
-static void vesadrm_set_gamma_lut(struct vesadrm_device *vesa,
- const struct drm_format_info *format,
- struct drm_color_lut *lut)
+static void vesadrm_load_gamma_lut(struct vesadrm_device *vesa,
+ const struct drm_format_info *format,
+ struct drm_color_lut *lut)
{
struct drm_device *dev = &vesa->sysfb.dev;
- size_t i;
- u16 r16, g16, b16;
+ struct drm_crtc *crtc = &vesa->crtc;
switch (format->format) {
case DRM_FORMAT_XRGB1555:
- for (i = 0; i < 32; ++i) {
- r16 = lut[i * 8 + i / 4].red;
- g16 = lut[i * 8 + i / 4].green;
- b16 = lut[i * 8 + i / 4].blue;
- vesa->cmap_write(vesa, i, r16, g16, b16);
- }
+ drm_crtc_load_gamma_555_from_888(crtc, lut, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB565:
- for (i = 0; i < 32; ++i) {
- r16 = lut[i * 8 + i / 4].red;
- g16 = lut[i * 4 + i / 16].green;
- b16 = lut[i * 8 + i / 4].blue;
- vesa->cmap_write(vesa, i, r16, g16, b16);
- }
- for (i = 32; i < 64; ++i)
- vesa->cmap_write(vesa, i, 0, lut[i * 4 + i / 16].green, 0);
+ drm_crtc_load_gamma_565_from_888(crtc, lut, vesadrm_set_gamma_lut);
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_BGRX8888:
- for (i = 0; i < 256; ++i)
- vesa->cmap_write(vesa, i, lut[i].red, lut[i].green, lut[i].blue);
+ drm_crtc_load_gamma_888(crtc, lut, vesadrm_set_gamma_lut);
break;
default:
drm_warn_once(dev, "Unsupported format %p4cc for gamma correction\n",
@@ -253,13 +225,13 @@ static void vesadrm_crtc_helper_atomic_flush(struct drm_crtc *crtc,
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
if (sysfb_crtc_state->format == sysfb->fb_format) {
if (crtc_state->gamma_lut)
- vesadrm_set_gamma_lut(vesa,
- sysfb_crtc_state->format,
- crtc_state->gamma_lut->data);
+ vesadrm_load_gamma_lut(vesa,
+ sysfb_crtc_state->format,
+ crtc_state->gamma_lut->data);
else
- vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
} else {
- vesadrm_set_gamma_linear(vesa, sysfb_crtc_state->format);
+ vesadrm_fill_gamma_lut(vesa, sysfb_crtc_state->format);
}
}
}
@@ -372,7 +344,7 @@ static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
#endif
}
-#ifdef CONFIG_X86
+#if defined(CONFIG_FIRMWARE_EDID)
if (drm_edid_header_is_valid(edid_info.dummy) == 8)
sysfb->edid = edid_info.dummy;
#endif
@@ -430,8 +402,8 @@ static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv,
/* Primary plane */
- nformats = drm_fb_build_fourcc_list(dev, &format->format, 1,
- vesa->formats, ARRAY_SIZE(vesa->formats));
+ nformats = drm_sysfb_build_fourcc_list(dev, &format->format, 1,
+ vesa->formats, ARRAY_SIZE(vesa->formats));
primary_plane = &vesa->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0, &vesadrm_primary_plane_funcs,
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
index 3afd6587df08..c0e952293ad0 100644
--- a/drivers/gpu/drm/tests/Makefile
+++ b/drivers/gpu/drm/tests/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_modes_test.o \
drm_plane_helper_test.o \
drm_probe_helper_test.o \
- drm_rect_test.o
+ drm_rect_test.o \
+ drm_sysfb_modeset_test.o
CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c
index ff88ec2e911c..887020141c7f 100644
--- a/drivers/gpu/drm/tests/drm_bridge_test.c
+++ b/drivers/gpu/drm/tests/drm_bridge_test.c
@@ -8,36 +8,64 @@
#include <drm/drm_bridge_helper.h>
#include <drm/drm_kunit_helpers.h>
+#include <kunit/device.h>
#include <kunit/test.h>
+/*
+ * Mimick the typical "private" struct defined by a bridge driver, which
+ * embeds a bridge plus other fields.
+ *
+ * Having at least one member before @bridge ensures we test non-zero
+ * @bridge offset.
+ */
+struct drm_bridge_priv {
+ unsigned int enable_count;
+ unsigned int disable_count;
+ struct drm_bridge bridge;
+ void *data;
+};
+
struct drm_bridge_init_priv {
struct drm_device drm;
+ /** @dev: device, only for tests not needing a whole drm_device */
+ struct device *dev;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_encoder encoder;
- struct drm_bridge bridge;
+ struct drm_bridge_priv *test_bridge;
struct drm_connector *connector;
- unsigned int enable_count;
- unsigned int disable_count;
+ bool destroyed;
};
+static struct drm_bridge_priv *bridge_to_priv(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct drm_bridge_priv, bridge);
+}
+
+static void drm_test_bridge_priv_destroy(struct drm_bridge *bridge)
+{
+ struct drm_bridge_priv *bridge_priv = bridge_to_priv(bridge);
+ struct drm_bridge_init_priv *priv = (struct drm_bridge_init_priv *)bridge_priv->data;
+
+ priv->destroyed = true;
+}
+
static void drm_test_bridge_enable(struct drm_bridge *bridge)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->enable_count++;
}
static void drm_test_bridge_disable(struct drm_bridge *bridge)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->disable_count++;
}
static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
+ .destroy = drm_test_bridge_priv_destroy,
.enable = drm_test_bridge_enable,
.disable = drm_test_bridge_disable,
};
@@ -45,8 +73,7 @@ static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = {
static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->enable_count++;
}
@@ -54,13 +81,13 @@ static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge,
static void drm_test_bridge_atomic_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
- struct drm_bridge_init_priv *priv =
- container_of(bridge, struct drm_bridge_init_priv, bridge);
+ struct drm_bridge_priv *priv = bridge_to_priv(bridge);
priv->disable_count++;
}
static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = {
+ .destroy = drm_test_bridge_priv_destroy,
.atomic_enable = drm_test_bridge_atomic_enable,
.atomic_disable = drm_test_bridge_atomic_disable,
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
@@ -102,6 +129,12 @@ drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
if (IS_ERR(priv))
return ERR_CAST(priv);
+ priv->test_bridge = devm_drm_bridge_alloc(dev, struct drm_bridge_priv, bridge, funcs);
+ if (IS_ERR(priv->test_bridge))
+ return ERR_CAST(priv->test_bridge);
+
+ priv->test_bridge->data = priv;
+
drm = &priv->drm;
priv->plane = drm_kunit_helper_create_primary_plane(test, drm,
NULL,
@@ -125,9 +158,8 @@ drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs)
enc->possible_crtcs = drm_crtc_mask(priv->crtc);
- bridge = &priv->bridge;
+ bridge = &priv->test_bridge->bridge;
bridge->type = DRM_MODE_CONNECTOR_VIRTUAL;
- bridge->funcs = funcs;
ret = drm_kunit_bridge_add(test, bridge);
if (ret)
@@ -173,7 +205,7 @@ static void drm_test_drm_bridge_get_current_state_atomic(struct kunit *test)
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
retry_commit:
- bridge = &priv->bridge;
+ bridge = &priv->test_bridge->bridge;
bridge_state = drm_atomic_get_bridge_state(state, bridge);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bridge_state);
@@ -228,7 +260,7 @@ static void drm_test_drm_bridge_get_current_state_legacy(struct kunit *test)
* locking. The function would return NULL in all cases anyway,
* so we don't really have any concurrency to worry about.
*/
- bridge = &priv->bridge;
+ bridge = &priv->test_bridge->bridge;
KUNIT_EXPECT_NULL(test, drm_bridge_get_current_state(bridge));
}
@@ -253,7 +285,7 @@ static void drm_test_drm_bridge_helper_reset_crtc_atomic(struct kunit *test)
struct drm_modeset_acquire_ctx ctx;
struct drm_bridge_init_priv *priv;
struct drm_display_mode *mode;
- struct drm_bridge *bridge;
+ struct drm_bridge_priv *bridge_priv;
int ret;
priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
@@ -279,14 +311,14 @@ retry_commit:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- bridge = &priv->bridge;
- KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
- KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
drm_modeset_acquire_init(&ctx, 0);
retry_reset:
- ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry_reset;
@@ -296,8 +328,8 @@ retry_reset:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
- KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 1);
}
/*
@@ -309,7 +341,7 @@ static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *
struct drm_modeset_acquire_ctx ctx;
struct drm_bridge_init_priv *priv;
struct drm_display_mode *mode;
- struct drm_bridge *bridge;
+ struct drm_bridge_priv *bridge_priv;
int ret;
priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs);
@@ -318,14 +350,14 @@ static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *
mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode);
- bridge = &priv->bridge;
- KUNIT_ASSERT_EQ(test, priv->enable_count, 0);
- KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 0);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
drm_modeset_acquire_init(&ctx, 0);
retry_reset:
- ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry_reset;
@@ -335,8 +367,8 @@ retry_reset:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- KUNIT_EXPECT_EQ(test, priv->enable_count, 0);
- KUNIT_EXPECT_EQ(test, priv->disable_count, 0);
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 0);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 0);
}
/*
@@ -348,7 +380,7 @@ static void drm_test_drm_bridge_helper_reset_crtc_legacy(struct kunit *test)
struct drm_modeset_acquire_ctx ctx;
struct drm_bridge_init_priv *priv;
struct drm_display_mode *mode;
- struct drm_bridge *bridge;
+ struct drm_bridge_priv *bridge_priv;
int ret;
priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs);
@@ -374,14 +406,14 @@ retry_commit:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- bridge = &priv->bridge;
- KUNIT_ASSERT_EQ(test, priv->enable_count, 1);
- KUNIT_ASSERT_EQ(test, priv->disable_count, 0);
+ bridge_priv = priv->test_bridge;
+ KUNIT_ASSERT_EQ(test, bridge_priv->enable_count, 1);
+ KUNIT_ASSERT_EQ(test, bridge_priv->disable_count, 0);
drm_modeset_acquire_init(&ctx, 0);
retry_reset:
- ret = drm_bridge_helper_reset_crtc(bridge, &ctx);
+ ret = drm_bridge_helper_reset_crtc(&bridge_priv->bridge, &ctx);
if (ret == -EDEADLK) {
drm_modeset_backoff(&ctx);
goto retry_reset;
@@ -391,8 +423,8 @@ retry_reset:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- KUNIT_EXPECT_EQ(test, priv->enable_count, 2);
- KUNIT_EXPECT_EQ(test, priv->disable_count, 1);
+ KUNIT_EXPECT_EQ(test, bridge_priv->enable_count, 2);
+ KUNIT_EXPECT_EQ(test, bridge_priv->disable_count, 1);
}
static struct kunit_case drm_bridge_helper_reset_crtc_tests[] = {
@@ -407,11 +439,83 @@ static struct kunit_suite drm_bridge_helper_reset_crtc_test_suite = {
.test_cases = drm_bridge_helper_reset_crtc_tests,
};
+static int drm_test_bridge_alloc_init(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv;
+
+ priv = kunit_kzalloc(test, sizeof(*priv), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
+
+ priv->dev = kunit_device_register(test, "drm-bridge-dev");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->dev);
+
+ test->priv = priv;
+
+ priv->test_bridge = devm_drm_bridge_alloc(priv->dev, struct drm_bridge_priv, bridge,
+ &drm_test_bridge_atomic_funcs);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv->test_bridge);
+
+ priv->test_bridge->data = priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ return 0;
+}
+
+/*
+ * Test that a bridge is freed when the device is destroyed in lack of
+ * other drm_bridge_get/put() operations.
+ */
+static void drm_test_drm_bridge_alloc_basic(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv = test->priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ kunit_device_unregister(test, priv->dev);
+ KUNIT_EXPECT_TRUE(test, priv->destroyed);
+}
+
+/*
+ * Test that a bridge is not freed when the device is destroyed when there
+ * is still a reference to it, and freed when that reference is put.
+ */
+static void drm_test_drm_bridge_alloc_get_put(struct kunit *test)
+{
+ struct drm_bridge_init_priv *priv = test->priv;
+
+ KUNIT_ASSERT_FALSE(test, priv->destroyed);
+
+ drm_bridge_get(&priv->test_bridge->bridge);
+ KUNIT_EXPECT_FALSE(test, priv->destroyed);
+
+ kunit_device_unregister(test, priv->dev);
+ KUNIT_EXPECT_FALSE(test, priv->destroyed);
+
+ drm_bridge_put(&priv->test_bridge->bridge);
+ KUNIT_EXPECT_TRUE(test, priv->destroyed);
+}
+
+static struct kunit_case drm_bridge_alloc_tests[] = {
+ KUNIT_CASE(drm_test_drm_bridge_alloc_basic),
+ KUNIT_CASE(drm_test_drm_bridge_alloc_get_put),
+ { }
+};
+
+static struct kunit_suite drm_bridge_alloc_test_suite = {
+ .name = "drm_bridge_alloc",
+ .init = drm_test_bridge_alloc_init,
+ .test_cases = drm_bridge_alloc_tests,
+};
+
kunit_test_suites(
&drm_bridge_get_current_state_test_suite,
&drm_bridge_helper_reset_crtc_test_suite,
+ &drm_bridge_alloc_test_suite,
);
MODULE_AUTHOR("Maxime Ripard <mripard@kernel.org>");
+MODULE_AUTHOR("Luca Ceresoli <luca.ceresoli@bootlin.com>");
+
MODULE_DESCRIPTION("Kunit test for drm_bridge functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 35cd3405d045..ad06762db671 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -279,9 +279,9 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.dst_pitch = TEST_USE_DEFAULT_PITCH,
.expected = {
0xFF, 0x00,
- 0x4C, 0x99,
- 0x19, 0x66,
- 0xE5, 0xB2,
+ 0x4C, 0x95,
+ 0x1C, 0x69,
+ 0xE2, 0xB2,
},
},
.rgb332_result = {
@@ -430,9 +430,9 @@ static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
.gray8_result = {
.dst_pitch = 5,
.expected = {
- 0x3C, 0x33, 0xC4, 0x00, 0x00,
- 0xBB, 0x3C, 0x33, 0x00, 0x00,
- 0x34, 0xBB, 0x3C, 0x00, 0x00,
+ 0x3D, 0x32, 0xC1, 0x00, 0x00,
+ 0xBA, 0x3D, 0x32, 0x00, 0x00,
+ 0x34, 0xBA, 0x3D, 0x00, 0x00,
},
},
.rgb332_result = {
@@ -748,14 +748,9 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
buf = dst.vaddr;
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB565, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, &params->clip,
+ &fmtcnv_state, false);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -795,14 +790,8 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB1555, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_xrgb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -842,14 +831,8 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB1555, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb1555(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -889,14 +872,8 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGBA5551, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_rgba5551(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -939,12 +916,7 @@ static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_RGB888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
+ drm_fb_xrgb8888_to_rgb888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -985,12 +957,8 @@ static void drm_test_fb_xrgb8888_to_bgr888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, &result->dst_pitch, DRM_FORMAT_BGR888, &src, &fb, &params->clip,
+ drm_fb_xrgb8888_to_bgr888(&dst, &result->dst_pitch, &src, &fb, &params->clip,
&fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1030,14 +998,8 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1077,12 +1039,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB2101010, &src, &fb,
- &params->clip, &fmtcnv_state);
-
- KUNIT_EXPECT_FALSE(test, blit_result);
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1122,14 +1079,8 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ARGB2101010, &src, &fb,
- &params->clip, &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_argb2101010(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1202,23 +1153,15 @@ static void drm_test_fb_swab(struct kunit *test)
buf = dst.vaddr; /* restore original value of buf */
memset(buf, 0, dst_size);
- int blit_result;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888 | DRM_FORMAT_BIG_ENDIAN,
- &src, &fb, &params->clip, &fmtcnv_state);
+ drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr;
memset(buf, 0, dst_size);
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_BGRX8888, &src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_xrgb8888_to_bgrx8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
buf = dst.vaddr;
@@ -1229,11 +1172,8 @@ static void drm_test_fb_swab(struct kunit *test)
mock_format.format |= DRM_FORMAT_BIG_ENDIAN;
fb.format = &mock_format;
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XRGB8888, &src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_swab(&dst, dst_pitch, &src, &fb, &params->clip, false, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1266,14 +1206,8 @@ static void drm_test_fb_xrgb8888_to_abgr8888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_ABGR8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_abgr8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1306,14 +1240,8 @@ static void drm_test_fb_xrgb8888_to_xbgr8888(struct kunit *test)
const unsigned int *dst_pitch = (result->dst_pitch == TEST_USE_DEFAULT_PITCH) ?
NULL : &result->dst_pitch;
- int blit_result = 0;
-
- blit_result = drm_fb_blit(&dst, dst_pitch, DRM_FORMAT_XBGR8888, &src, &fb, &params->clip,
- &fmtcnv_state);
-
+ drm_fb_xrgb8888_to_xbgr8888(&dst, dst_pitch, &src, &fb, &params->clip, &fmtcnv_state);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
-
- KUNIT_EXPECT_FALSE(test, blit_result);
KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
@@ -1407,147 +1335,6 @@ static void drm_test_fb_clip_offset(struct kunit *test)
KUNIT_EXPECT_EQ(test, offset, params->expected_offset);
}
-struct fb_build_fourcc_list_case {
- const char *name;
- u32 native_fourccs[TEST_BUF_SIZE];
- size_t native_fourccs_size;
- u32 expected[TEST_BUF_SIZE];
- size_t expected_fourccs_size;
-};
-
-static struct fb_build_fourcc_list_case fb_build_fourcc_list_cases[] = {
- {
- .name = "no native formats",
- .native_fourccs = { },
- .native_fourccs_size = 0,
- .expected = { DRM_FORMAT_XRGB8888 },
- .expected_fourccs_size = 1,
- },
- {
- .name = "XRGB8888 as native format",
- .native_fourccs = { DRM_FORMAT_XRGB8888 },
- .native_fourccs_size = 1,
- .expected = { DRM_FORMAT_XRGB8888 },
- .expected_fourccs_size = 1,
- },
- {
- .name = "remove duplicates",
- .native_fourccs = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_XRGB8888,
- },
- .native_fourccs_size = 11,
- .expected = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- },
- .expected_fourccs_size = 3,
- },
- {
- .name = "convert alpha formats",
- .native_fourccs = {
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_BGRA5551,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_RGBA8888,
- DRM_FORMAT_BGRA8888,
- DRM_FORMAT_ARGB2101010,
- DRM_FORMAT_ABGR2101010,
- DRM_FORMAT_RGBA1010102,
- DRM_FORMAT_BGRA1010102,
- },
- .native_fourccs_size = 12,
- .expected = {
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_XBGR1555,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_BGRX5551,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_RGBX8888,
- DRM_FORMAT_BGRX8888,
- DRM_FORMAT_XRGB2101010,
- DRM_FORMAT_XBGR2101010,
- DRM_FORMAT_RGBX1010102,
- DRM_FORMAT_BGRX1010102,
- },
- .expected_fourccs_size = 12,
- },
- {
- .name = "random formats",
- .native_fourccs = {
- DRM_FORMAT_Y212,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR16161616F,
- DRM_FORMAT_C8,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_RGBA5551,
- DRM_FORMAT_BGR565_A8,
- DRM_FORMAT_R10,
- DRM_FORMAT_XYUV8888,
- },
- .native_fourccs_size = 10,
- .expected = {
- DRM_FORMAT_Y212,
- DRM_FORMAT_XRGB1555,
- DRM_FORMAT_ABGR16161616F,
- DRM_FORMAT_C8,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_RGBX5551,
- DRM_FORMAT_BGR565_A8,
- DRM_FORMAT_R10,
- DRM_FORMAT_XYUV8888,
- DRM_FORMAT_XRGB8888,
- },
- .expected_fourccs_size = 10,
- },
-};
-
-static void fb_build_fourcc_list_case_desc(struct fb_build_fourcc_list_case *t, char *desc)
-{
- strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
-}
-
-KUNIT_ARRAY_PARAM(fb_build_fourcc_list, fb_build_fourcc_list_cases, fb_build_fourcc_list_case_desc);
-
-static void drm_test_fb_build_fourcc_list(struct kunit *test)
-{
- const struct fb_build_fourcc_list_case *params = test->param_value;
- u32 fourccs_out[TEST_BUF_SIZE] = {0};
- size_t nfourccs_out;
- struct drm_device *drm;
- struct device *dev;
-
- dev = drm_kunit_helper_alloc_device(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
-
- drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
-
- nfourccs_out = drm_fb_build_fourcc_list(drm, params->native_fourccs,
- params->native_fourccs_size,
- fourccs_out, TEST_BUF_SIZE);
-
- KUNIT_EXPECT_EQ(test, nfourccs_out, params->expected_fourccs_size);
- KUNIT_EXPECT_MEMEQ(test, fourccs_out, params->expected, TEST_BUF_SIZE);
-}
-
struct fb_memcpy_case {
const char *name;
u32 format;
@@ -1910,12 +1697,8 @@ static void drm_test_fb_memcpy(struct kunit *test)
memset(buf[i], 0, dst_size[i]);
}
- int blit_result;
-
- blit_result = drm_fb_blit(dst, dst_pitches, params->format, src, &fb, &params->clip,
- &fmtcnv_state);
+ drm_fb_memcpy(dst, dst_pitches, src, &fb, &params->clip);
- KUNIT_EXPECT_FALSE(test, blit_result);
for (size_t i = 0; i < fb.format->num_planes; i++) {
expected[i] = cpubuf_to_le32(test, params->expected[i], TEST_BUF_SIZE);
KUNIT_EXPECT_MEMEQ_MSG(test, buf[i], expected[i], dst_size[i],
@@ -1940,7 +1723,6 @@ static struct kunit_case drm_format_helper_test_cases[] = {
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xbgr8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_abgr8888, convert_xrgb8888_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_clip_offset, clip_offset_gen_params),
- KUNIT_CASE_PARAM(drm_test_fb_build_fourcc_list, fb_build_fourcc_list_gen_params),
KUNIT_CASE_PARAM(drm_test_fb_memcpy, fb_memcpy_gen_params),
{}
};
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index 7ffd666753b1..8bd412735000 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -33,7 +33,7 @@ struct drm_atomic_helper_connector_hdmi_priv {
struct drm_encoder encoder;
struct drm_connector connector;
- const char *current_edid;
+ const void *current_edid;
size_t current_edid_len;
};
@@ -56,7 +56,7 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec
}
static int set_connector_edid(struct kunit *test, struct drm_connector *connector,
- const char *edid, size_t edid_len)
+ const void *edid, size_t edid_len)
{
struct drm_atomic_helper_connector_hdmi_priv *priv =
connector_to_priv(connector);
@@ -89,15 +89,15 @@ static const struct drm_connector_hdmi_funcs reject_connector_hdmi_funcs = {
};
static enum drm_mode_status
-reject_100MHz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
+reject_100mhz_connector_tmds_char_rate_valid(const struct drm_connector *connector,
const struct drm_display_mode *mode,
unsigned long long tmds_rate)
{
return (tmds_rate > 100ULL * 1000 * 1000) ? MODE_BAD : MODE_OK;
}
-static const struct drm_connector_hdmi_funcs reject_100_MHz_connector_hdmi_funcs = {
- .tmds_char_rate_valid = reject_100MHz_connector_tmds_char_rate_valid,
+static const struct drm_connector_hdmi_funcs reject_100mhz_connector_hdmi_funcs = {
+ .tmds_char_rate_valid = reject_100mhz_connector_tmds_char_rate_valid,
};
static int dummy_connector_get_modes(struct drm_connector *connector)
@@ -140,10 +140,11 @@ static const struct drm_connector_funcs dummy_connector_funcs = {
static
struct drm_atomic_helper_connector_hdmi_priv *
-drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
- unsigned int formats,
- unsigned int max_bpc,
- const struct drm_connector_hdmi_funcs *hdmi_funcs)
+__connector_hdmi_init(struct kunit *test,
+ unsigned int formats,
+ unsigned int max_bpc,
+ const struct drm_connector_hdmi_funcs *hdmi_funcs,
+ const void *edid_data, size_t edid_len)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
@@ -182,6 +183,8 @@ drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
enc->possible_crtcs = drm_crtc_mask(priv->crtc);
conn = &priv->connector;
+ conn->ycbcr_420_allowed = !!(formats & BIT(HDMI_COLORSPACE_YUV420));
+
ret = drmm_connector_hdmi_init(drm, conn,
"Vendor", "Product",
&dummy_connector_funcs,
@@ -197,29 +200,28 @@ drm_kunit_helper_connector_hdmi_init_funcs(struct kunit *test,
drm_mode_config_reset(drm);
+ if (edid_data && edid_len) {
+ ret = set_connector_edid(test, &priv->connector, edid_data, edid_len);
+ KUNIT_ASSERT_GT(test, ret, 0);
+ }
+
return priv;
}
+#define drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test, formats, max_bpc, funcs, edid) \
+ __connector_hdmi_init(test, formats, max_bpc, funcs, edid, ARRAY_SIZE(edid))
+
static
struct drm_atomic_helper_connector_hdmi_priv *
drm_kunit_helper_connector_hdmi_init(struct kunit *test,
unsigned int formats,
unsigned int max_bpc)
{
- struct drm_atomic_helper_connector_hdmi_priv *priv;
- int ret;
-
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- formats, max_bpc,
- &dummy_connector_hdmi_funcs);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv);
-
- ret = set_connector_edid(test, &priv->connector,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
- return priv;
+ return drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ formats,
+ max_bpc,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
}
/*
@@ -414,7 +416,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
@@ -474,7 +476,7 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_EXPECT_FALSE(test, conn_state->hdmi.is_limited_range);
@@ -533,7 +535,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -595,7 +597,7 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -658,7 +660,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -720,7 +722,7 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
ret = drm_atomic_check_only(state);
KUNIT_ASSERT_EQ(test, ret, 0);
- conn_state = drm_atomic_get_connector_state(state, conn);
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
KUNIT_ASSERT_EQ(test,
@@ -734,6 +736,107 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
}
/*
+ * Test that for an HDMI connector, with an HDMI monitor, we will
+ * get a limited RGB Quantization Range with a YUV420 mode, no
+ * matter what the value of the Broadcast RGB property is set to.
+ */
+static void drm_test_check_broadcast_rgb_cea_mode_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ enum drm_hdmi_broadcast_rgb broadcast_rgb;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_atomic_state *state;
+ struct drm_display_mode *mode;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ broadcast_rgb = *(enum drm_hdmi_broadcast_rgb *)test->param_value;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
+
+ mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, mode);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ mode, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_conn_state:
+ conn_state = drm_atomic_get_connector_state(state, conn);
+ if (PTR_ERR(conn_state) == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_state;
+ }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ conn_state->hdmi.broadcast_rgb = broadcast_rgb;
+
+ ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_state;
+ }
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ conn_state = drm_atomic_get_new_connector_state(state, conn);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
+
+ KUNIT_ASSERT_EQ(test, conn_state->hdmi.broadcast_rgb, broadcast_rgb);
+ KUNIT_ASSERT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_YUV420);
+
+ KUNIT_EXPECT_TRUE(test, conn_state->hdmi.is_limited_range);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+static const enum drm_hdmi_broadcast_rgb check_broadcast_rgb_cea_mode_yuv420_tests[] = {
+ DRM_HDMI_BROADCAST_RGB_AUTO,
+ DRM_HDMI_BROADCAST_RGB_FULL,
+ DRM_HDMI_BROADCAST_RGB_LIMITED,
+};
+
+static void
+check_broadcast_rgb_cea_mode_yuv420_desc(const enum drm_hdmi_broadcast_rgb *broadcast_rgb,
+ char *desc)
+{
+ sprintf(desc, "%s", drm_hdmi_connector_get_broadcast_rgb_name(*broadcast_rgb));
+}
+
+KUNIT_ARRAY_PARAM(check_broadcast_rgb_cea_mode_yuv420,
+ check_broadcast_rgb_cea_mode_yuv420_tests,
+ check_broadcast_rgb_cea_mode_yuv420_desc);
+
+/*
* Test that if we change the maximum bpc property to a different value,
* we trigger a mode change on the connector's CRTC, which will in turn
* disable/enable the connector.
@@ -752,19 +855,16 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -831,19 +931,16 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -905,21 +1002,18 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_dvi_1080p);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_dvi_1080p,
- ARRAY_SIZE(test_edid_dvi_1080p));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_FALSE(test, info->is_hdmi);
@@ -959,19 +1053,16 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1011,19 +1102,16 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 10);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 10,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1063,19 +1151,16 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1168,7 +1253,7 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
* Then we will pick the latter, and the computed TMDS character rate
* will be equal to 1.25 times the mode pixel clock.
*/
-static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
+static void drm_test_check_max_tmds_rate_bpc_fallback_rgb(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_modeset_acquire_ctx ctx;
@@ -1181,19 +1266,16 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1229,6 +1311,80 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
/*
* Test that if:
+ * - We have an HDMI connector and a display supporting both RGB and YUV420
+ * - The chosen mode can be supported in YUV420 output format only
+ * - The chosen mode has a TMDS character rate higher than the display
+ * supports in YUV420/12bpc
+ * - The chosen mode has a TMDS character rate lower than the display
+ * supports in YUV420/10bpc.
+ *
+ * Then we will pick the latter, and the computed TMDS character rate
+ * will be equal to 1.25 * 0.5 times the mode pixel clock.
+ */
+static void drm_test_check_max_tmds_rate_bpc_fallback_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_display_info *info;
+ struct drm_display_mode *yuv420_only_mode;
+ unsigned long long rate;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ KUNIT_ASSERT_TRUE(test, conn->ycbcr_420_allowed);
+
+ yuv420_only_mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, yuv420_only_mode);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_only(info, yuv420_only_mode));
+
+ rate = drm_hdmi_compute_mode_clock(yuv420_only_mode, 12, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_GT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(yuv420_only_mode, 10, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ yuv420_only_mode, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 10);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_YUV420);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.tmds_char_rate, yuv420_only_mode->clock * 625);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that if:
* - We have an HDMI connector supporting both RGB and YUV422 and up to
* 12 bpc
* - The chosen mode has a TMDS character rate higher than the display
@@ -1240,7 +1396,7 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
* Then we will prefer to keep the RGB format with a lower bpc over
* picking YUV422.
*/
-static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
+static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_modeset_acquire_ctx ctx;
@@ -1253,21 +1409,18 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1304,6 +1457,170 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
}
/*
+ * Test that if:
+ * - We have an HDMI connector supporting both RGB and YUV420 and up to
+ * 12 bpc
+ * - The chosen mode has a TMDS character rate higher than the display
+ * supports in RGB/10bpc but lower than the display supports in
+ * RGB/8bpc
+ * - The chosen mode has a TMDS character rate lower than the display
+ * supports in YUV420/12bpc.
+ *
+ * Then we will prefer to keep the RGB format with a lower bpc over
+ * picking YUV420.
+ */
+static void drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_display_info *info;
+ struct drm_display_mode *preferred;
+ unsigned long long rate;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV420),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_4k_rgb_yuv420_dc_max_340mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
+ KUNIT_ASSERT_TRUE(test, conn->ycbcr_420_allowed);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_also(info, preferred));
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 8, HDMI_COLORSPACE_RGB);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB);
+ KUNIT_ASSERT_GT(test, rate, info->max_tmds_clock * 1000);
+
+ rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV420);
+ KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ preferred, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_bpc, 8);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
+ * Test that if a driver supports only RGB, but the chosen mode can be
+ * supported by the screen only in YUV420 output format, we end up with
+ * unsuccessful fallback attempts.
+ */
+static void drm_test_check_driver_unsupported_fallback_yuv420(struct kunit *test)
+{
+ struct drm_atomic_helper_connector_hdmi_priv *priv;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_connector_state *conn_state;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ struct drm_display_info *info;
+ struct drm_display_mode *preferred, *yuv420_only_mode;
+ struct drm_connector *conn;
+ struct drm_device *drm;
+ struct drm_crtc *crtc;
+ int ret;
+
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz);
+ KUNIT_ASSERT_NOT_NULL(test, priv);
+
+ drm = &priv->drm;
+ crtc = priv->crtc;
+ conn = &priv->connector;
+ info = &conn->display_info;
+ KUNIT_ASSERT_TRUE(test, info->is_hdmi);
+ KUNIT_ASSERT_FALSE(test, conn->ycbcr_420_allowed);
+
+ preferred = find_preferred_mode(conn);
+ KUNIT_ASSERT_NOT_NULL(test, preferred);
+ KUNIT_ASSERT_FALSE(test, drm_mode_is_420_also(info, preferred));
+
+ yuv420_only_mode = drm_kunit_display_mode_from_cea_vic(test, drm, 95);
+ KUNIT_ASSERT_NOT_NULL(test, yuv420_only_mode);
+ KUNIT_ASSERT_TRUE(test, drm_mode_is_420_only(info, yuv420_only_mode));
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry_conn_enable:
+ ret = drm_kunit_helper_enable_crtc_connector(test, drm, crtc, conn,
+ preferred, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_conn_enable;
+ }
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ conn_state = conn->state;
+ KUNIT_ASSERT_NOT_NULL(test, conn_state);
+ KUNIT_EXPECT_EQ(test, conn_state->hdmi.output_format, HDMI_COLORSPACE_RGB);
+
+ state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+
+retry_crtc_state:
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (PTR_ERR(crtc_state) == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_crtc_state;
+ }
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state);
+
+ ret = drm_atomic_set_mode_for_crtc(crtc_state, yuv420_only_mode);
+ KUNIT_EXPECT_EQ(test, ret, 0);
+
+ ret = drm_atomic_check_only(state);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret)
+ goto retry_crtc_state;
+ }
+ KUNIT_ASSERT_LT(test, ret, 0);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+}
+
+/*
* Test that if a driver and screen supports RGB and YUV formats, and we
* try to set the VIC 1 mode, we end up with 8bpc RGB even if we could
* have had a higher bpc.
@@ -1321,20 +1638,17 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1388,19 +1702,16 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1458,21 +1769,18 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1531,19 +1839,16 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1594,21 +1899,18 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
struct drm_crtc *crtc;
int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB) |
- BIT(HDMI_COLORSPACE_YUV422) |
- BIT(HDMI_COLORSPACE_YUV444),
- 12);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB) |
+ BIT(HDMI_COLORSPACE_YUV422) |
+ BIT(HDMI_COLORSPACE_YUV444),
+ 12,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_340mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
drm = &priv->drm;
crtc = priv->crtc;
conn = &priv->connector;
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_340mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_340mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
info = &conn->display_info;
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
@@ -1704,17 +2006,17 @@ static struct kunit_case drm_atomic_helper_connector_hdmi_check_tests[] = {
KUNIT_CASE(drm_test_check_broadcast_rgb_full_cea_mode_vic_1),
KUNIT_CASE(drm_test_check_broadcast_rgb_limited_cea_mode),
KUNIT_CASE(drm_test_check_broadcast_rgb_limited_cea_mode_vic_1),
- /*
- * TODO: When we'll have YUV output support, we need to check
- * that the limited range is always set to limited no matter
- * what the value of Broadcast RGB is.
- */
+ KUNIT_CASE_PARAM(drm_test_check_broadcast_rgb_cea_mode_yuv420,
+ check_broadcast_rgb_cea_mode_yuv420_gen_params),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_changed),
KUNIT_CASE(drm_test_check_broadcast_rgb_crtc_mode_not_changed),
KUNIT_CASE(drm_test_check_disable_connector),
KUNIT_CASE(drm_test_check_hdmi_funcs_reject_rate),
- KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback),
- KUNIT_CASE(drm_test_check_max_tmds_rate_format_fallback),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_rgb),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_yuv420),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv422),
+ KUNIT_CASE(drm_test_check_max_tmds_rate_bpc_fallback_ignore_yuv420),
+ KUNIT_CASE(drm_test_check_driver_unsupported_fallback_yuv420),
KUNIT_CASE(drm_test_check_output_bpc_crtc_mode_changed),
KUNIT_CASE(drm_test_check_output_bpc_crtc_mode_not_changed),
KUNIT_CASE(drm_test_check_output_bpc_dvi),
@@ -1927,28 +2229,20 @@ static void drm_test_check_mode_valid(struct kunit *test)
static void drm_test_check_mode_valid_reject_rate(struct kunit *test)
{
struct drm_atomic_helper_connector_hdmi_priv *priv;
- struct drm_connector *conn;
struct drm_display_mode *preferred;
- int ret;
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8,
- &reject_100_MHz_connector_hdmi_funcs);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_100mhz_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_200mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
- conn = &priv->connector;
-
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_200mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
/*
* Unlike the drm_test_check_mode_valid() here 1080p is rejected, but
* 480p is allowed.
*/
- preferred = find_preferred_mode(conn);
+ preferred = find_preferred_mode(&priv->connector);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_EXPECT_EQ(test, preferred->hdisplay, 640);
KUNIT_EXPECT_EQ(test, preferred->vdisplay, 480);
@@ -1966,12 +2260,14 @@ static void drm_test_check_mode_valid_reject(struct kunit *test)
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
struct drm_display_mode *preferred;
+ unsigned char no_edid[] = {};
int ret;
- priv = drm_kunit_helper_connector_hdmi_init_funcs(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8,
- &reject_connector_hdmi_funcs);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &reject_connector_hdmi_funcs,
+ no_edid);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
@@ -1996,20 +2292,15 @@ static void drm_test_check_mode_valid_reject_max_clock(struct kunit *test)
struct drm_atomic_helper_connector_hdmi_priv *priv;
struct drm_connector *conn;
struct drm_display_mode *preferred;
- int ret;
- priv = drm_kunit_helper_connector_hdmi_init(test,
- BIT(HDMI_COLORSPACE_RGB),
- 8);
+ priv = drm_kunit_helper_connector_hdmi_init_with_edid_funcs(test,
+ BIT(HDMI_COLORSPACE_RGB),
+ 8,
+ &dummy_connector_hdmi_funcs,
+ test_edid_hdmi_1080p_rgb_max_100mhz);
KUNIT_ASSERT_NOT_NULL(test, priv);
conn = &priv->connector;
-
- ret = set_connector_edid(test, conn,
- test_edid_hdmi_1080p_rgb_max_100mhz,
- ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_100mhz));
- KUNIT_ASSERT_GT(test, ret, 0);
-
KUNIT_ASSERT_EQ(test, conn->display_info.max_tmds_clock, 100 * 1000);
preferred = find_preferred_mode(conn);
diff --git a/drivers/gpu/drm/tests/drm_kunit_edid.h b/drivers/gpu/drm/tests/drm_kunit_edid.h
index 6358397a5d7a..02e2761b3b1f 100644
--- a/drivers/gpu/drm/tests/drm_kunit_edid.h
+++ b/drivers/gpu/drm/tests/drm_kunit_edid.h
@@ -583,4 +583,236 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = {
0x00, 0x00, 0x00, 0x8c
};
+/*
+ * Max resolution:
+ * - 1920x1080@60Hz with RGB, YUV444, YUV422
+ * - 3840x2160@30Hz with YUV420 only
+ * Max BPC: 16 for all modes
+ * Max TMDS clock: 200 MHz
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 34 00 00 00 00 00
+ * ff 23 01 03 80 60 36 78 0f ee 91 a3 54 4c 99 26
+ * 0f 50 54 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 02 3a 80 18 71 38 2d 40 58 2c
+ * 45 00 c0 1c 32 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 18
+ * 55 18 5e 11 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 bb
+ *
+ * 02 03 29 31 42 90 5f 6c 03 0c 00 10 00 78 28 20
+ * 00 00 01 03 6d d8 5d c4 01 28 80 07 00 00 00 00
+ * 00 00 e3 0f 00 00 e2 0e 5f 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ca
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 52
+ * Model year: 2025
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 96 cm x 54 cm
+ * Gamma: 2.20
+ * RGB color display
+ * Default (sRGB) color space is primary color space
+ * First detailed timing is the preferred timing
+ * Supports GTF timings within operating range
+ * Color Characteristics:
+ * Red : 0.6396, 0.3300
+ * Green: 0.2998, 0.5996
+ * Blue : 0.1503, 0.0595
+ * White: 0.3125, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (960 mm x 540 mm)
+ * Hfront 88 Hsync 44 Hback 148 Hpol P
+ * Vfront 4 Vsync 5 Vback 36 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 24-85 Hz V, 24-94 kHz H, max dotclock 170 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0xbb
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 1
+ * Video Data Block:
+ * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native)
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * DC_48bit
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 200 MHz
+ * Extended HDMI video details:
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 200 MHz
+ * SCDC Present
+ * Supports 16-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * Empty Capability Map
+ * YCbCr 4:2:0 Video Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Checksum: 0xca
+ */
+static const unsigned char test_edid_hdmi_1080p_rgb_yuv_4k_yuv420_dc_max_200mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x34, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x23, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x02, 0x3a, 0x80, 0x18, 0x71, 0x38,
+ 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0xc0, 0x1c, 0x32, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+ 0x55, 0x18, 0x5e, 0x11, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xbb, 0x02, 0x03, 0x29, 0x31,
+ 0x42, 0x90, 0x5f, 0x6c, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x78, 0x28, 0x20,
+ 0x00, 0x00, 0x01, 0x03, 0x6d, 0xd8, 0x5d, 0xc4, 0x01, 0x28, 0x80, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x0f, 0x00, 0x00, 0xe2, 0x0e,
+ 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xca
+};
+
+/*
+ * Max resolution: 3840x2160@30Hz with RGB, YUV444, YUV422, YUV420
+ * Max BPC: 16 for all modes
+ * Max TMDS clock: 340 MHz
+ *
+ * edid-decode (hex):
+ *
+ * 00 ff ff ff ff ff ff 00 31 d8 34 00 00 00 00 00
+ * ff 23 01 03 80 60 36 78 0f ee 91 a3 54 4c 99 26
+ * 0f 50 54 20 00 00 01 01 01 01 01 01 01 01 01 01
+ * 01 01 01 01 01 01 04 74 00 30 f2 70 5a 80 b0 58
+ * 8a 00 40 84 63 00 00 1e 00 00 00 fc 00 54 65 73
+ * 74 20 45 44 49 44 0a 20 20 20 00 00 00 fd 00 18
+ * 55 18 5e 22 00 0a 20 20 20 20 20 20 00 00 00 10
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 ce
+ *
+ * 02 03 27 31 41 5f 6c 03 0c 00 10 00 78 44 20 00
+ * 00 01 03 6d d8 5d c4 01 44 80 07 00 00 00 00 00
+ * 00 e3 0f 01 00 e1 0e 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 84
+ *
+ * ----------------
+ *
+ * Block 0, Base EDID:
+ * EDID Structure Version & Revision: 1.3
+ * Vendor & Product Identification:
+ * Manufacturer: LNX
+ * Model: 52
+ * Model year: 2025
+ * Basic Display Parameters & Features:
+ * Digital display
+ * Maximum image size: 96 cm x 54 cm
+ * Gamma: 2.20
+ * RGB color display
+ * Default (sRGB) color space is primary color space
+ * First detailed timing is the preferred timing
+ * Supports GTF timings within operating range
+ * Color Characteristics:
+ * Red : 0.6396, 0.3300
+ * Green: 0.2998, 0.5996
+ * Blue : 0.1503, 0.0595
+ * White: 0.3125, 0.3291
+ * Established Timings I & II:
+ * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz
+ * Standard Timings: none
+ * Detailed Timing Descriptors:
+ * DTD 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz (1600 mm x 900 mm)
+ * Hfront 176 Hsync 88 Hback 296 Hpol P
+ * Vfront 8 Vsync 10 Vback 72 Vpol P
+ * Display Product Name: 'Test EDID'
+ * Display Range Limits:
+ * Monitor ranges (GTF): 24-85 Hz V, 24-94 kHz H, max dotclock 340 MHz
+ * Dummy Descriptor:
+ * Extension blocks: 1
+ * Checksum: 0xce
+ *
+ * ----------------
+ *
+ * Block 1, CTA-861 Extension Block:
+ * Revision: 3
+ * Supports YCbCr 4:4:4
+ * Supports YCbCr 4:2:2
+ * Native detailed modes: 1
+ * Video Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * Vendor-Specific Data Block (HDMI), OUI 00-0C-03:
+ * Source physical address: 1.0.0.0
+ * DC_48bit
+ * DC_36bit
+ * DC_30bit
+ * DC_Y444
+ * Maximum TMDS clock: 340 MHz
+ * Extended HDMI video details:
+ * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8:
+ * Version: 1
+ * Maximum TMDS Character Rate: 340 MHz
+ * SCDC Present
+ * Supports 16-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding
+ * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding
+ * YCbCr 4:2:0 Capability Map Data Block:
+ * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz
+ * YCbCr 4:2:0 Video Data Block:
+ * Checksum: 0x84
+ */
+static const unsigned char test_edid_hdmi_4k_rgb_yuv420_dc_max_340mhz[] = {
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x34, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xff, 0x23, 0x01, 0x03, 0x80, 0x60, 0x36, 0x78,
+ 0x0f, 0xee, 0x91, 0xa3, 0x54, 0x4c, 0x99, 0x26, 0x0f, 0x50, 0x54, 0x20,
+ 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x74, 0x00, 0x30, 0xf2, 0x70,
+ 0x5a, 0x80, 0xb0, 0x58, 0x8a, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e,
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44,
+ 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18,
+ 0x55, 0x18, 0x5e, 0x22, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0xce, 0x02, 0x03, 0x27, 0x31,
+ 0x41, 0x5f, 0x6c, 0x03, 0x0c, 0x00, 0x10, 0x00, 0x78, 0x44, 0x20, 0x00,
+ 0x00, 0x01, 0x03, 0x6d, 0xd8, 0x5d, 0xc4, 0x01, 0x44, 0x80, 0x07, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe3, 0x0f, 0x01, 0x00, 0xe1, 0x0e, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x84
+};
+
#endif // DRM_KUNIT_EDID_H_
diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c
index 5f7257840d8e..04edb6079c0d 100644
--- a/drivers/gpu/drm/tests/drm_kunit_helpers.c
+++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c
@@ -13,6 +13,7 @@
#include <kunit/resource.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#define KUNIT_DEVICE_NAME "drm-kunit-mock-device"
diff --git a/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c b/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c
new file mode 100644
index 000000000000..e875d876118f
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_sysfb_modeset_test.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_kunit_helpers.h>
+
+#include "../sysfb/drm_sysfb_helper.h"
+
+#define TEST_BUF_SIZE 50
+
+struct sysfb_build_fourcc_list_case {
+ const char *name;
+ u32 native_fourccs[TEST_BUF_SIZE];
+ size_t native_fourccs_size;
+ u32 expected[TEST_BUF_SIZE];
+ size_t expected_fourccs_size;
+};
+
+static struct sysfb_build_fourcc_list_case sysfb_build_fourcc_list_cases[] = {
+ {
+ .name = "no native formats",
+ .native_fourccs = { },
+ .native_fourccs_size = 0,
+ .expected = { DRM_FORMAT_XRGB8888 },
+ .expected_fourccs_size = 1,
+ },
+ {
+ .name = "XRGB8888 as native format",
+ .native_fourccs = { DRM_FORMAT_XRGB8888 },
+ .native_fourccs_size = 1,
+ .expected = { DRM_FORMAT_XRGB8888 },
+ .expected_fourccs_size = 1,
+ },
+ {
+ .name = "remove duplicates",
+ .native_fourccs = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ },
+ .native_fourccs_size = 11,
+ .expected = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ },
+ .expected_fourccs_size = 3,
+ },
+ {
+ .name = "convert alpha formats",
+ .native_fourccs = {
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_ARGB2101010,
+ DRM_FORMAT_ABGR2101010,
+ DRM_FORMAT_RGBA1010102,
+ DRM_FORMAT_BGRA1010102,
+ },
+ .native_fourccs_size = 12,
+ .expected = {
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_RGBX1010102,
+ DRM_FORMAT_BGRX1010102,
+ },
+ .expected_fourccs_size = 12,
+ },
+ {
+ .name = "random formats",
+ .native_fourccs = {
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_C8,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_R10,
+ DRM_FORMAT_XYUV8888,
+ },
+ .native_fourccs_size = 10,
+ .expected = {
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_C8,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_R10,
+ DRM_FORMAT_XYUV8888,
+ DRM_FORMAT_XRGB8888,
+ },
+ .expected_fourccs_size = 10,
+ },
+};
+
+static void sysfb_build_fourcc_list_case_desc(struct sysfb_build_fourcc_list_case *t, char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(sysfb_build_fourcc_list, sysfb_build_fourcc_list_cases,
+ sysfb_build_fourcc_list_case_desc);
+
+static void drm_test_sysfb_build_fourcc_list(struct kunit *test)
+{
+ const struct sysfb_build_fourcc_list_case *params = test->param_value;
+ u32 fourccs_out[TEST_BUF_SIZE] = {0};
+ size_t nfourccs_out;
+ struct drm_device *drm;
+ struct device *dev;
+
+ dev = drm_kunit_helper_alloc_device(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev);
+
+ drm = __drm_kunit_helper_alloc_drm_device(test, dev, sizeof(*drm), 0, DRIVER_MODESET);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, drm);
+
+ nfourccs_out = drm_sysfb_build_fourcc_list(drm, params->native_fourccs,
+ params->native_fourccs_size,
+ fourccs_out, TEST_BUF_SIZE);
+
+ KUNIT_EXPECT_EQ(test, nfourccs_out, params->expected_fourccs_size);
+ KUNIT_EXPECT_MEMEQ(test, fourccs_out, params->expected, TEST_BUF_SIZE);
+}
+
+static struct kunit_case drm_sysfb_modeset_test_cases[] = {
+ KUNIT_CASE_PARAM(drm_test_sysfb_build_fourcc_list, sysfb_build_fourcc_list_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_sysfb_modeset_test_suite = {
+ .name = "drm_sysfb_modeset_test",
+ .test_cases = drm_sysfb_modeset_test_cases,
+};
+
+kunit_test_suite(drm_sysfb_modeset_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for the drm_sysfb_modeset APIs");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index 94f8e3178df5..a2f40a5c7703 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -130,7 +130,7 @@ static void tidss_crtc_position_planes(struct tidss_device *tidss,
!to_tidss_crtc_state(cstate)->plane_pos_changed)
return;
- for (layer = 0; layer < tidss->feat->num_planes; layer++) {
+ for (layer = 0; layer < tidss->feat->num_vids ; layer++) {
struct drm_plane_state *pstate;
struct drm_plane *plane;
bool layer_active = false;
@@ -271,7 +271,7 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
* another videoport, the DSS will report sync lost issues. Disable all
* the layers here as a work-around.
*/
- for (u32 layer = 0; layer < tidss->feat->num_planes; layer++)
+ for (u32 layer = 0; layer < tidss->feat->num_vids; layer++)
dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
false);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index cacb5f3d8085..21363ccbd763 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -103,9 +103,16 @@ const struct dispc_features dispc_k2g_feats = {
},
},
- .num_planes = 1,
- .vid_name = { "vid1" },
- .vid_lite = { false },
+ .num_vids = 1,
+
+ .vid_info = {
+ {
+ .name = "vid1",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ },
+
.vid_order = { 0 },
};
@@ -178,11 +185,22 @@ const struct dispc_features dispc_am65x_feats = {
},
},
- .num_planes = 2,
+ .num_vids = 2,
/* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ },
+ },
+
+ .vid_order = {1, 0},
};
static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
@@ -267,9 +285,32 @@ const struct dispc_features dispc_j721e_feats = {
.gamma_type = TIDSS_GAMMA_10BIT,
},
},
- .num_planes = 4,
- .vid_name = { "vid1", "vidl1", "vid2", "vidl2" },
- .vid_lite = { 0, 1, 0, 1, },
+
+ .num_vids = 4,
+
+ .vid_info = {
+ {
+ .name = "vid1",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ },
+ {
+ .name = "vid2",
+ .is_lite = false,
+ .hw_id = 2,
+ },
+ {
+ .name = "vidl2",
+ .is_lite = true,
+ .hw_id = 3,
+ },
+ },
+
.vid_order = { 1, 3, 0, 2 },
};
@@ -315,11 +356,23 @@ const struct dispc_features dispc_am625_feats = {
},
},
- .num_planes = 2,
+ .num_vids = 2,
+
/* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {1, 0},
};
const struct dispc_features dispc_am62a7_feats = {
@@ -369,11 +422,58 @@ const struct dispc_features dispc_am62a7_feats = {
},
},
- .num_planes = 2,
- /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
- .vid_name = { "vid", "vidl1" },
- .vid_lite = { false, true, },
- .vid_order = { 1, 0 },
+ .num_vids = 2,
+
+ .vid_info = {
+ {
+ .name = "vid",
+ .is_lite = false,
+ .hw_id = 0,
+ },
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {1, 0},
+};
+
+const struct dispc_features dispc_am62l_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 165000,
+ },
+
+ .subrev = DISPC_AM62L,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 1,
+ .vp_name = { "vp1" },
+ .ovr_name = { "ovr1" },
+ .vpclk_name = { "vp1" },
+ .vp_bus_type = { DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_vids = 1,
+
+ .vid_info = {
+ {
+ .name = "vidl1",
+ .is_lite = true,
+ .hw_id = 1,
+ }
+ },
+
+ .vid_order = {0},
};
static const u16 *dispc_common_regmap;
@@ -734,7 +834,8 @@ static void dispc_k3_vp_write_irqstatus(struct dispc_device *dispc,
static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
u32 hw_plane)
{
- u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_id));
return dispc_vid_irq_from_raw(stat, hw_plane);
}
@@ -742,9 +843,10 @@ static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
static void dispc_k3_vid_write_irqstatus(struct dispc_device *dispc,
u32 hw_plane, dispc_irq_t vidstat)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
- dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
+ dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_id), stat);
}
static dispc_irq_t dispc_k3_vp_read_irqenable(struct dispc_device *dispc,
@@ -766,7 +868,8 @@ static void dispc_k3_vp_set_irqenable(struct dispc_device *dispc,
static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
u32 hw_plane)
{
- u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_id));
return dispc_vid_irq_from_raw(stat, hw_plane);
}
@@ -774,9 +877,10 @@ static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
static void dispc_k3_vid_set_irqenable(struct dispc_device *dispc,
u32 hw_plane, dispc_irq_t vidstat)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
- dispc_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
+ dispc_write(dispc, DISPC_VID_IRQENABLE(hw_id), stat);
}
static
@@ -788,7 +892,8 @@ void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
if (clearmask & DSS_IRQ_VP_MASK(i))
dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
}
- for (i = 0; i < dispc->feat->num_planes; ++i) {
+
+ for (i = 0; i < dispc->feat->num_vids; ++i) {
if (clearmask & DSS_IRQ_PLANE_MASK(i))
dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
}
@@ -809,7 +914,7 @@ dispc_irq_t dispc_k3_read_and_clear_irqstatus(struct dispc_device *dispc)
for (i = 0; i < dispc->feat->num_vps; ++i)
status |= dispc_k3_vp_read_irqstatus(dispc, i);
- for (i = 0; i < dispc->feat->num_planes; ++i)
+ for (i = 0; i < dispc->feat->num_vids; ++i)
status |= dispc_k3_vid_read_irqstatus(dispc, i);
dispc_k3_clear_irqstatus(dispc, status);
@@ -825,7 +930,7 @@ static dispc_irq_t dispc_k3_read_irqenable(struct dispc_device *dispc)
for (i = 0; i < dispc->feat->num_vps; ++i)
enable |= dispc_k3_vp_read_irqenable(dispc, i);
- for (i = 0; i < dispc->feat->num_planes; ++i)
+ for (i = 0; i < dispc->feat->num_vids; ++i)
enable |= dispc_k3_vid_read_irqenable(dispc, i);
return enable;
@@ -851,12 +956,15 @@ static void dispc_k3_set_irqenable(struct dispc_device *dispc,
main_disable |= BIT(i); /* VP IRQ */
}
- for (i = 0; i < dispc->feat->num_planes; ++i) {
+ for (i = 0; i < dispc->feat->num_vids; ++i) {
+ u32 hw_id = dispc->feat->vid_info[i].hw_id;
+
dispc_k3_vid_set_irqenable(dispc, i, mask);
+
if (mask & DSS_IRQ_PLANE_MASK(i))
- main_enable |= BIT(i + 4); /* VID IRQ */
+ main_enable |= BIT(hw_id + 4); /* VID IRQ */
else
- main_disable |= BIT(i + 4); /* VID IRQ */
+ main_disable |= BIT(hw_id + 4); /* VID IRQ */
}
if (main_enable)
@@ -879,6 +987,7 @@ dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
return dispc_k2g_read_and_clear_irqstatus(dispc);
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
return dispc_k3_read_and_clear_irqstatus(dispc);
@@ -896,6 +1005,7 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_set_irqenable(dispc, mask);
@@ -1358,8 +1468,10 @@ static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
u32 hw_plane, u32 hw_videoport,
u32 x, u32 y, u32 layer)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_plane, 4, 1);
+ hw_id, 4, 1);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
x, 17, 6);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
@@ -1370,8 +1482,10 @@ static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
u32 hw_plane, u32 hw_videoport,
u32 x, u32 y, u32 layer)
{
+ u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id;
+
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
- hw_plane, 4, 1);
+ hw_id, 4, 1);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
x, 13, 0);
OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
@@ -1388,6 +1502,7 @@ void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
x, y, layer);
@@ -2025,7 +2140,7 @@ int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport)
{
- bool lite = dispc->feat->vid_lite[hw_plane];
+ bool lite = dispc->feat->vid_info[hw_plane].is_lite;
u32 fourcc = state->fb->format->format;
bool need_scaling = state->src_w >> 16 != state->crtc_w ||
state->src_h >> 16 != state->crtc_h;
@@ -2096,7 +2211,7 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport)
{
- bool lite = dispc->feat->vid_lite[hw_plane];
+ bool lite = dispc->feat->vid_info[hw_plane].is_lite;
u32 fourcc = state->fb->format->format;
u16 cpp = state->fb->format->cpp[0];
u32 fb_width = state->fb->pitches[0] / cpp;
@@ -2210,7 +2325,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
/* MFLAG_START = MFLAGNORMALSTARTMODE */
REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
- for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
u32 thr_low, thr_high;
u32 mflag_low, mflag_high;
@@ -2226,7 +2341,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev,
"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
- dispc->feat->vid_name[hw_plane],
+ dispc->feat->vid_info[hw_plane].name,
size,
thr_high, thr_low,
mflag_high, mflag_low,
@@ -2265,7 +2380,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
/* MFLAG_START = MFLAGNORMALSTARTMODE */
REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
- for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) {
u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
u32 thr_low, thr_high;
u32 mflag_low, mflag_high;
@@ -2281,7 +2396,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc)
dev_dbg(dispc->dev,
"%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
- dispc->feat->vid_name[hw_plane],
+ dispc->feat->vid_info[hw_plane].name,
size,
thr_high, thr_low,
mflag_high, mflag_low,
@@ -2308,6 +2423,7 @@ static void dispc_plane_init(struct dispc_device *dispc)
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
case DISPC_J721E:
dispc_k3_plane_init(dispc);
@@ -2416,6 +2532,7 @@ static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
break;
case DISPC_AM625:
case DISPC_AM62A7:
+ case DISPC_AM62L:
case DISPC_AM65X:
dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
break;
@@ -2898,8 +3015,8 @@ int dispc_init(struct tidss_device *tidss)
if (r)
return r;
- for (i = 0; i < dispc->feat->num_planes; i++) {
- r = dispc_iomap_resource(pdev, dispc->feat->vid_name[i],
+ for (i = 0; i < dispc->feat->num_vids; i++) {
+ r = dispc_iomap_resource(pdev, dispc->feat->vid_info[i].name,
&dispc->base_vid[i]);
if (r)
return r;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index 086327d51a90..28958514b8f5 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -46,6 +46,12 @@ struct dispc_features_scaling {
u32 xinc_max;
};
+struct dispc_vid_info {
+ const char *name; /* Should match dt reg names */
+ u32 hw_id;
+ bool is_lite;
+};
+
struct dispc_errata {
bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
};
@@ -61,6 +67,7 @@ enum dispc_vp_bus_type {
enum dispc_dss_subrevision {
DISPC_K2G,
DISPC_AM625,
+ DISPC_AM62L,
DISPC_AM62A7,
DISPC_AM65X,
DISPC_J721E,
@@ -82,15 +89,15 @@ struct dispc_features {
const char *vpclk_name[TIDSS_MAX_PORTS]; /* Should match dt clk names */
const enum dispc_vp_bus_type vp_bus_type[TIDSS_MAX_PORTS];
struct tidss_vp_feat vp_feat;
- u32 num_planes;
- const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
- bool vid_lite[TIDSS_MAX_PLANES];
+ u32 num_vids;
+ struct dispc_vid_info vid_info[TIDSS_MAX_PLANES];
u32 vid_order[TIDSS_MAX_PLANES];
};
extern const struct dispc_features dispc_k2g_feats;
extern const struct dispc_features dispc_am625_feats;
extern const struct dispc_features dispc_am62a7_feats;
+extern const struct dispc_features dispc_am62l_feats;
extern const struct dispc_features dispc_am65x_feats;
extern const struct dispc_features dispc_j721e_feats;
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index d4652e8cc28c..f2a4f659f574 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -242,6 +242,7 @@ static const struct of_device_id tidss_of_table[] = {
{ .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
{ .compatible = "ti,am625-dss", .data = &dispc_am625_feats, },
{ .compatible = "ti,am62a7-dss", .data = &dispc_am62a7_feats, },
+ { .compatible = "ti,am62l-dss", .data = &dispc_am62l_feats, },
{ .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
{ .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
{ }
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index f371518f8697..19432c08ec6b 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -115,7 +115,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss)
const struct dispc_features *feat = tidss->feat;
u32 max_vps = feat->num_vps;
- u32 max_planes = feat->num_planes;
+ u32 max_planes = feat->num_vids;
struct pipe pipes[TIDSS_MAX_PORTS];
u32 num_pipes = 0;
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index 719412e6c346..142ae81951a0 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -200,7 +200,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
struct tidss_plane *tplane;
enum drm_plane_type type;
u32 possible_crtcs;
- u32 num_planes = tidss->feat->num_planes;
+ u32 num_planes = tidss->feat->num_vids;
u32 color_encodings = (BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709));
u32 color_ranges = (BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
diff --git a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
index b91c13f46225..7aaf0d1395ff 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_kunit_helpers.c
@@ -2,6 +2,9 @@
/*
* Copyright © 2023 Intel Corporation
*/
+
+#include <linux/export.h>
+
#include <drm/ttm/ttm_tt.h>
#include "ttm_kunit_helpers.h"
diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
index f6d1c8a2845d..d7eb6471f2ed 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c
@@ -2,6 +2,9 @@
/*
* Copyright © 2023 Intel Corporation
*/
+
+#include <linux/export.h>
+
#include <drm/ttm/ttm_resource.h>
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index d27691f2e451..fca0a1a3c6fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -36,6 +36,7 @@
#include <drm/ttm/ttm_tt.h>
#include <drm/ttm/ttm_resource.h>
#include <linux/agp_backend.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c
index ffaab68bd5dd..cb1b8e5dadf5 100644
--- a/drivers/gpu/drm/ttm/ttm_backup.c
+++ b/drivers/gpu/drm/ttm/ttm_backup.c
@@ -4,6 +4,8 @@
*/
#include <drm/ttm/ttm_backup.h>
+
+#include <linux/export.h>
#include <linux/page-flags.h>
#include <linux/swap.h>
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 08a23ab037cb..bb9c5c8e16b5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -35,6 +35,7 @@
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
+#include <linux/export.h>
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/sched.h>
@@ -46,6 +47,7 @@
#include <linux/dma-resv.h>
#include "ttm_module.h"
+#include "ttm_bo_internal.h"
static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_internal.h b/drivers/gpu/drm/ttm/ttm_bo_internal.h
new file mode 100644
index 000000000000..9d8b747a34db
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo_internal.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+
+#ifndef _TTM_BO_INTERNAL_H_
+#define _TTM_BO_INTERNAL_H_
+
+#include <drm/ttm/ttm_bo.h>
+
+/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+}
+
+/**
+ * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
+ * its refcount has already reached zero.
+ * @bo: The buffer object.
+ *
+ * Used to reference a TTM buffer object in lookups where the object is removed
+ * from the lookup structure during the destructor and for RCU lookups.
+ *
+ * Returns: @bo if the referencing was successful, NULL otherwise.
+ */
+static inline __must_check struct ttm_buffer_object *
+ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
+{
+ if (!kref_get_unless_zero(&bo->kref))
+ return NULL;
+ return bo;
+}
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15cab9bda17f..b9a772b26fa1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -28,6 +28,8 @@
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
+
+#include <linux/export.h>
#include <linux/swap.h>
#include <linux/vmalloc.h>
@@ -37,6 +39,8 @@
#include <drm/drm_cache.h>
+#include "ttm_bo_internal.h"
+
struct ttm_transfer_obj {
struct ttm_buffer_object base;
struct ttm_buffer_object *bo;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index bdfa6ecfef05..b47020fca199 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -31,6 +31,8 @@
#define pr_fmt(fmt) "[TTM] " fmt
+#include <linux/export.h>
+
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 02e797fd1891..816e2cba6016 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -28,6 +28,7 @@
#define pr_fmt(fmt) "[TTM DEVICE] " fmt
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <drm/ttm/ttm_bo.h>
@@ -36,6 +37,7 @@
#include <drm/ttm/ttm_placement.h>
#include "ttm_module.h"
+#include "ttm_bo_internal.h"
/*
* ttm_global_mutex - protecting the global state
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index f1c60fa80c2d..bc7a83a9fe44 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -26,6 +26,8 @@
*
**************************************************************************/
+#include <linux/export.h>
+
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_bo.h>
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index c2ea865be657..baf27c70a419 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -31,6 +31,7 @@
* cause they are rather slow compared to alloc_pages+map.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
@@ -1132,7 +1133,9 @@ void ttm_pool_fini(struct ttm_pool *pool)
}
EXPORT_SYMBOL(ttm_pool_fini);
-/* As long as pages are available make sure to release at least one */
+/* Free average pool number of pages. */
+#define TTM_SHRINKER_BATCH ((1 << (MAX_PAGE_ORDER / 2)) * NR_PAGE_ORDERS)
+
static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
struct shrink_control *sc)
{
@@ -1140,9 +1143,12 @@ static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
do
num_freed += ttm_pool_shrink();
- while (!num_freed && atomic_long_read(&allocated_pages));
+ while (num_freed < sc->nr_to_scan &&
+ atomic_long_read(&allocated_pages));
+
+ sc->nr_scanned = num_freed;
- return num_freed;
+ return num_freed ?: SHRINK_STOP;
}
/* Return the number of pages available or SHRINK_EMPTY if we have none */
@@ -1233,7 +1239,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
{
unsigned int i;
- if (!pool->use_dma_alloc) {
+ if (!pool->use_dma_alloc && pool->nid == NUMA_NO_NODE) {
seq_puts(m, "unused\n");
return 0;
}
@@ -1242,7 +1248,12 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
- seq_puts(m, "DMA ");
+ if (!ttm_pool_select_type(pool, i, 0))
+ continue;
+ if (pool->use_dma_alloc)
+ seq_puts(m, "DMA ");
+ else
+ seq_printf(m, "N%d ", pool->nid);
switch (i) {
case ttm_cached:
seq_puts(m, "\t:");
@@ -1266,10 +1277,15 @@ EXPORT_SYMBOL(ttm_pool_debugfs);
/* Test the shrinker functions and dump the result */
static int ttm_pool_debugfs_shrink_show(struct seq_file *m, void *data)
{
- struct shrink_control sc = { .gfp_mask = GFP_NOFS };
+ struct shrink_control sc = {
+ .gfp_mask = GFP_NOFS,
+ .nr_to_scan = TTM_SHRINKER_BATCH,
+ };
+ unsigned long count;
fs_reclaim_acquire(GFP_KERNEL);
- seq_printf(m, "%lu/%lu\n", ttm_pool_shrinker_count(mm_shrinker, &sc),
+ count = ttm_pool_shrinker_count(mm_shrinker, &sc);
+ seq_printf(m, "%lu/%lu\n", count,
ttm_pool_shrinker_scan(mm_shrinker, &sc));
fs_reclaim_release(GFP_KERNEL);
@@ -1324,6 +1340,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
mm_shrinker->count_objects = ttm_pool_shrinker_count;
mm_shrinker->scan_objects = ttm_pool_shrinker_scan;
+ mm_shrinker->batch = TTM_SHRINKER_BATCH;
mm_shrinker->seeks = 1;
shrinker_register(mm_shrinker);
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index ae11d07eb63a..db854b581d83 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -34,6 +34,8 @@
#include <drm/ttm/ttm_range_manager.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/drm_mm.h>
+
+#include <linux/export.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 769b0ca9be47..e2c82ad07eb4 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -23,6 +23,7 @@
*/
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/io-mapping.h>
#include <linux/iosys-map.h>
#include <linux/scatterlist.h>
@@ -557,6 +558,9 @@ int ttm_resource_manager_evict_all(struct ttm_device *bdev,
cond_resched();
} while (!ret);
+ if (ret && ret != -ENOENT)
+ return ret;
+
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
spin_unlock(&man->move_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 698cd4bf5e46..506e257dfba8 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -33,6 +33,7 @@
#include <linux/cc_platform.h>
#include <linux/debugfs.h>
+#include <linux/export.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/sched.h>
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 1922988625eb..ce5ae7cacb90 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -57,7 +57,7 @@ static const struct drm_driver driver = {
/* GEM hooks */
.fops = &udl_driver_fops,
- DRM_GEM_SHMEM_DRIVER_OPS,
+ DRM_GEM_SHMEM_DRIVER_OPS_NO_MAP_SGT,
DRM_FBDEV_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c
index 4ff5de46fb22..5171ffe9012d 100644
--- a/drivers/gpu/drm/v3d/v3d_submit.c
+++ b/drivers/gpu/drm/v3d/v3d_submit.c
@@ -169,7 +169,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->file = file_priv;
ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- 1, v3d_priv);
+ 1, v3d_priv, file_priv->client_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index a29a6ef266f9..07c91b450f93 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -562,7 +562,7 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
ret = drm_connector_hdmi_audio_init(connector, dev->dev,
&vc4_hdmi_audio_funcs,
- 8, false, -1);
+ 8, 0, false, -1);
if (ret)
return ret;
@@ -2291,6 +2291,12 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
return ret;
}
+ ret = drm_connector_hdmi_audio_init(&vc4_hdmi->connector, dev,
+ &vc4_hdmi_audio_funcs, 8, 0, false,
+ -1);
+ if (ret)
+ return ret;
+
dai_link->cpus = &vc4_hdmi->audio.cpu;
dai_link->codecs = &vc4_hdmi->audio.codec;
dai_link->platforms = &vc4_hdmi->audio.platform;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index f17660a71a3e..f7def8b42068 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -88,6 +88,7 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
struct drm_gem_shmem_object base;
+ struct dma_buf *dma_buf;
struct sg_table *sgt;
uint32_t hw_res_handle;
bool dumb;
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index 1118a0250279..722cde5e2d86 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -206,7 +206,7 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj)
struct virtio_gpu_device *vgdev = obj->dev->dev_private;
if (drm_gem_is_imported(obj)) {
- struct dma_buf *dmabuf = obj->dma_buf;
+ struct dma_buf *dmabuf = bo->dma_buf;
dma_resv_lock(dmabuf->resv, NULL);
virtgpu_dma_buf_unmap(bo);
@@ -332,6 +332,7 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev,
obj->import_attach = attach;
get_dma_buf(buf);
+ bo->dma_buf = buf;
ret = virtgpu_dma_buf_init_obj(dev, bo, attach);
if (ret < 0)
diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile
index 9ded37b67a46..5750f0bd9d40 100644
--- a/drivers/gpu/drm/vkms/tests/Makefile
+++ b/drivers/gpu/drm/vkms/tests/Makefile
@@ -1,3 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms_config_test.o
+vkms-kunit-tests-y := \
+ vkms_config_test.o \
+ vkms_format_test.o
+
+obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms-kunit-tests.o
diff --git a/drivers/gpu/drm/vkms/tests/vkms_format_test.c b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
new file mode 100644
index 000000000000..2e1daef94831
--- /dev/null
+++ b/drivers/gpu/drm/vkms/tests/vkms_format_test.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_fixed.h>
+#include <drm/drm_fourcc.h>
+
+#include "../../drm_crtc_internal.h"
+
+#include "../vkms_formats.h"
+
+#define TEST_BUFF_SIZE 50
+
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
+
+/**
+ * struct pixel_yuv_u8 - Internal representation of a pixel color.
+ * @y: Luma value, stored in 8 bits, without padding, using
+ * machine endianness
+ * @u: Blue difference chroma value, stored in 8 bits, without padding, using
+ * machine endianness
+ * @v: Red difference chroma value, stored in 8 bits, without padding, using
+ * machine endianness
+ */
+struct pixel_yuv_u8 {
+ u8 y, u, v;
+};
+
+/*
+ * struct yuv_u8_to_argb_u16_case - Reference values to test the color
+ * conversions in VKMS between YUV to ARGB
+ *
+ * @encoding: Encoding used to convert RGB to YUV
+ * @range: Range used to convert RGB to YUV
+ * @n_colors: Count of test colors in this case
+ * @format_pair.name: Name used for this color conversion, used to
+ * clarify the test results
+ * @format_pair.rgb: RGB color tested
+ * @format_pair.yuv: Same color as @format_pair.rgb, but converted to
+ * YUV using @encoding and @range.
+ */
+struct yuv_u8_to_argb_u16_case {
+ enum drm_color_encoding encoding;
+ enum drm_color_range range;
+ size_t n_colors;
+ struct format_pair {
+ char *name;
+ struct pixel_yuv_u8 yuv;
+ struct pixel_argb_u16 argb;
+ } colors[TEST_BUFF_SIZE];
+};
+
+/*
+ * The YUV color representation were acquired via the colour python framework.
+ * Below are the function calls used for generating each case.
+ *
+ * For more information got to the docs:
+ * https://colour.readthedocs.io/en/master/generated/colour.RGB_to_YCbCr.html
+ */
+static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = {
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ *
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT601 full range using the ITU-R BT.601 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT601,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4c, 0x55, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x96, 0x2c, 0x15 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d, 0xff, 0x6b }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT601 limited range using the ITU-R BT.601 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT601,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x51, 0x5a, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0x91, 0x36, 0x22 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x29, 0xf0, 0x6e }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT709 full range using the ITU-R BT.709 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT709,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x36, 0x63, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xb6, 0x1e, 0x0c }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x12, 0xff, 0x74 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * in_bits = 16,
+ * int_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT709 limited range using the ITU-R BT.709 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT709,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x3f, 0x66, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad, 0x2a, 0x1a }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x20, 0xf0, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = False,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT2020 full range using the ITU-R BT.2020 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT2020,
+ .range = DRM_COLOR_YCBCR_FULL_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x43, 0x5c, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xad, 0x24, 0x0b }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x0f, 0xff, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+ /*
+ * colour.RGB_to_YCbCr(<rgb color in 16 bit form>,
+ * K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * in_bits = 16,
+ * in_legal = False,
+ * in_int = True,
+ * out_bits = 8,
+ * out_legal = True,
+ * out_int = True)
+ * Tests cases for color conversion generated by converting RGB
+ * values to YUV BT2020 limited range using the ITU-R BT.2020 weights.
+ */
+ {
+ .encoding = DRM_COLOR_YCBCR_BT2020,
+ .range = DRM_COLOR_YCBCR_LIMITED_RANGE,
+ .n_colors = 6,
+ .colors = {
+ { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }},
+ { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }},
+ { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }},
+ { "red", { 0x4a, 0x61, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }},
+ { "green", { 0xa4, 0x2f, 0x19 }, { 0xffff, 0x0000, 0xffff, 0x0000 }},
+ { "blue", { 0x1d, 0xf0, 0x77 }, { 0xffff, 0x0000, 0x0000, 0xffff }},
+ },
+ },
+};
+
+/*
+ * vkms_format_test_yuv_u8_to_argb_u16 - Testing the conversion between YUV
+ * colors to ARGB colors in VKMS
+ *
+ * This test will use the functions get_conversion_matrix_to_argb_u16 and
+ * argb_u16_from_yuv888 to convert YUV colors (stored in
+ * yuv_u8_to_argb_u16_cases) into ARGB colors.
+ *
+ * The conversion between YUV and RGB is not totally reversible, so there may be
+ * some difference between the expected value and the result.
+ * In addition, there may be some rounding error as the input color is 8 bits
+ * and output color is 16 bits.
+ */
+static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test)
+{
+ const struct yuv_u8_to_argb_u16_case *param = test->param_value;
+ struct pixel_argb_u16 argb;
+
+ for (size_t i = 0; i < param->n_colors; i++) {
+ const struct format_pair *color = &param->colors[i];
+ struct conversion_matrix matrix;
+
+ get_conversion_matrix_to_argb_u16
+ (DRM_FORMAT_NV12, param->encoding, param->range, &matrix);
+
+ argb = argb_u16_from_yuv888(color->yuv.y, color->yuv.u, color->yuv.v, &matrix);
+
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.a, color->argb.a), 0x1ff,
+ "On the A channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.a, argb.a);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.r, color->argb.r), 0x1ff,
+ "On the R channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.r, argb.r);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.g, color->argb.g), 0x1ff,
+ "On the G channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.g, argb.g);
+ KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.b, color->argb.b), 0x1ff,
+ "On the B channel of the color %s expected 0x%04x, got 0x%04x",
+ color->name, color->argb.b, argb.b);
+ }
+}
+
+static void vkms_format_test_yuv_u8_to_argb_u16_case_desc(struct yuv_u8_to_argb_u16_case *t,
+ char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s - %s",
+ drm_get_color_encoding_name(t->encoding), drm_get_color_range_name(t->range));
+}
+
+KUNIT_ARRAY_PARAM(yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_cases,
+ vkms_format_test_yuv_u8_to_argb_u16_case_desc
+);
+
+static struct kunit_case vkms_format_test_cases[] = {
+ KUNIT_CASE_PARAM(vkms_format_test_yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_gen_params),
+ {}
+};
+
+static struct kunit_suite vkms_format_test_suite = {
+ .name = "vkms-format",
+ .test_cases = vkms_format_test_cases,
+};
+
+kunit_test_suite(vkms_format_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Kunit test for vkms format conversion");
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index a74a7fc3a056..68b5e3fce455 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -45,6 +45,23 @@ struct vkms_frame_info {
unsigned int rotation;
};
+/**
+ * struct pixel_argb_u16 - Internal representation of a pixel color.
+ * @a: Alpha component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @r: Red component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @g: Green component value, stored in 16 bits, without padding, using
+ * machine endianness
+ * @b: Blue component value, stored in 16 bits, without padding, using
+ * machine endianness
+ *
+ * The goal of this structure is to keep enough precision to ensure
+ * correct composition results in VKMS and simplifying color
+ * manipulation by splitting each component into its own field.
+ * Caution: the byte ordering of this structure is machine-dependent,
+ * you can't cast it directly to AR48 or xR48.
+ */
struct pixel_argb_u16 {
u16 a, r, g, b;
};
@@ -103,16 +120,34 @@ typedef void (*pixel_read_line_t)(const struct vkms_plane_state *plane, int x_st
struct pixel_argb_u16 out_pixel[]);
/**
+ * struct conversion_matrix - Matrix to use for a specific encoding and range
+ *
+ * @matrix: Conversion matrix from yuv to rgb. The matrix is stored in a row-major manner and is
+ * used to compute rgb values from yuv values:
+ * [[r],[g],[b]] = @matrix * [[y],[u],[v]]
+ * OR for yvu formats:
+ * [[r],[g],[b]] = @matrix * [[y],[v],[u]]
+ * The values of the matrix are signed fixed-point values with 32 bits fractional part.
+ * @y_offset: Offset to apply on the y value.
+ */
+struct conversion_matrix {
+ s64 matrix[3][3];
+ int y_offset;
+};
+
+/**
* struct vkms_plane_state - Driver specific plane state
* @base: base plane state
* @frame_info: data required for composing computation
* @pixel_read_line: function to read a pixel line in this plane. The creator of a
* struct vkms_plane_state must ensure that this pointer is valid
+ * @conversion_matrix: matrix used for yuv formats to convert to rgb
*/
struct vkms_plane_state {
struct drm_shadow_plane_state base;
struct vkms_frame_info *frame_info;
pixel_read_line_t pixel_read_line;
+ struct conversion_matrix conversion_matrix;
};
struct vkms_plane {
diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c
index 30a64ecca87c..6d0227c6635a 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.c
+++ b/drivers/gpu/drm/vkms/vkms_formats.c
@@ -7,6 +7,8 @@
#include <drm/drm_rect.h>
#include <drm/drm_fixed.h>
+#include <kunit/visibility.h>
+
#include "vkms_formats.h"
/**
@@ -140,6 +142,51 @@ static void packed_pixels_addr_1x1(const struct vkms_frame_info *frame_info,
*addr = (u8 *)frame_info->map[0].vaddr + offset;
}
+/**
+ * get_subsampling() - Get the subsampling divisor value on a specific direction
+ *
+ * @format: format to extarct the subsampling from
+ * @direction: direction of the subsampling requested
+ */
+static int get_subsampling(const struct drm_format_info *format,
+ enum pixel_read_direction direction)
+{
+ switch (direction) {
+ case READ_BOTTOM_TO_TOP:
+ case READ_TOP_TO_BOTTOM:
+ return format->vsub;
+ case READ_RIGHT_TO_LEFT:
+ case READ_LEFT_TO_RIGHT:
+ return format->hsub;
+ }
+ WARN_ONCE(true, "Invalid direction for pixel reading: %d\n", direction);
+ return 1;
+}
+
+/**
+ * get_subsampling_offset() - An offset for keeping the chroma siting consistent regardless of
+ * x_start and y_start values
+ *
+ * @direction: direction of the reading to properly compute this offset
+ * @x_start: x coordinate of the starting point of the readed line
+ * @y_start: y coordinate of the starting point of the readed line
+ */
+static int get_subsampling_offset(enum pixel_read_direction direction, int x_start, int y_start)
+{
+ switch (direction) {
+ case READ_BOTTOM_TO_TOP:
+ return -y_start - 1;
+ case READ_TOP_TO_BOTTOM:
+ return y_start;
+ case READ_RIGHT_TO_LEFT:
+ return -x_start - 1;
+ case READ_LEFT_TO_RIGHT:
+ return x_start;
+ }
+ WARN_ONCE(true, "Invalid direction for pixel reading: %d\n", direction);
+ return 0;
+}
+
/*
* The following functions take pixel data (a, r, g, b, pixel, ...) and convert them to
* &struct pixel_argb_u16
@@ -202,11 +249,54 @@ static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
return out_pixel;
}
+static struct pixel_argb_u16 argb_u16_from_gray8(u8 gray)
+{
+ return argb_u16_from_u8888(255, gray, gray, gray);
+}
+
+static struct pixel_argb_u16 argb_u16_from_grayu16(u16 gray)
+{
+ return argb_u16_from_u16161616(0xFFFF, gray, gray, gray);
+}
+
+VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
+ const struct conversion_matrix *matrix)
+{
+ u16 r, g, b;
+ s64 fp_y, fp_channel_1, fp_channel_2;
+ s64 fp_r, fp_g, fp_b;
+
+ fp_y = drm_int2fixp(((int)y - matrix->y_offset) * 257);
+ fp_channel_1 = drm_int2fixp(((int)channel_1 - 128) * 257);
+ fp_channel_2 = drm_int2fixp(((int)channel_2 - 128) * 257);
+
+ fp_r = drm_fixp_mul(matrix->matrix[0][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[0][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[0][2], fp_channel_2);
+ fp_g = drm_fixp_mul(matrix->matrix[1][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[1][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[1][2], fp_channel_2);
+ fp_b = drm_fixp_mul(matrix->matrix[2][0], fp_y) +
+ drm_fixp_mul(matrix->matrix[2][1], fp_channel_1) +
+ drm_fixp_mul(matrix->matrix[2][2], fp_channel_2);
+
+ fp_r = drm_fixp2int_round(fp_r);
+ fp_g = drm_fixp2int_round(fp_g);
+ fp_b = drm_fixp2int_round(fp_b);
+
+ r = clamp(fp_r, 0, 0xffff);
+ g = clamp(fp_g, 0, 0xffff);
+ b = clamp(fp_b, 0, 0xffff);
+
+ return argb_u16_from_u16161616(0xffff, r, g, b);
+}
+EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv888);
+
/*
* The following functions are read_line function for each pixel format supported by VKMS.
*
* They read a line starting at the point @x_start,@y_start following the @direction. The result
- * is stored in @out_pixel and in the format ARGB16161616.
+ * is stored in @out_pixel and in a 64 bits format, see struct pixel_argb_u16.
*
* These functions are very repetitive, but the innermost pixel loops must be kept inside these
* functions for performance reasons. Some benchmarking was done in [1] where having the innermost
@@ -215,6 +305,96 @@ static struct pixel_argb_u16 argb_u16_from_RGB565(const __le16 *pixel)
* [1]: https://lore.kernel.org/dri-devel/d258c8dc-78e9-4509-9037-a98f7f33b3a3@riseup.net/
*/
+static void Rx_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ int bits_per_pixel = drm_format_info_bpp(plane->frame_info->fb->format, 0);
+ u8 *src_pixels;
+ int rem_x, rem_y;
+
+ WARN_ONCE(drm_format_info_block_height(plane->frame_info->fb->format, 0) != 1,
+ "%s() only support formats with block_h == 1", __func__);
+
+ packed_pixels_addr(plane->frame_info, x_start, y_start, 0, &src_pixels, &rem_x, &rem_y);
+ int bit_offset = (8 - bits_per_pixel) - rem_x * bits_per_pixel;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int mask = (0x1 << bits_per_pixel) - 1;
+ int lum_per_level = 0xFFFF / mask;
+
+ if (direction == READ_LEFT_TO_RIGHT || direction == READ_RIGHT_TO_LEFT) {
+ int restart_bit_offset;
+ int step_bit_offset;
+
+ if (direction == READ_LEFT_TO_RIGHT) {
+ restart_bit_offset = 8 - bits_per_pixel;
+ step_bit_offset = -bits_per_pixel;
+ } else {
+ restart_bit_offset = 0;
+ step_bit_offset = bits_per_pixel;
+ }
+
+ while (out_pixel < end) {
+ u8 val = ((*src_pixels) >> bit_offset) & mask;
+
+ *out_pixel = argb_u16_from_grayu16((int)val * lum_per_level);
+
+ bit_offset += step_bit_offset;
+ if (bit_offset < 0 || 8 <= bit_offset) {
+ bit_offset = restart_bit_offset;
+ src_pixels += step;
+ }
+ out_pixel += 1;
+ }
+ } else if (direction == READ_TOP_TO_BOTTOM || direction == READ_BOTTOM_TO_TOP) {
+ while (out_pixel < end) {
+ u8 val = (*src_pixels >> bit_offset) & mask;
+ *out_pixel = argb_u16_from_grayu16((int)val * lum_per_level);
+ src_pixels += step;
+ out_pixel += 1;
+ }
+ }
+}
+
+static void R1_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R2_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R4_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ Rx_read_line(plane, x_start, y_start, direction, count, out_pixel);
+}
+
+static void R8_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ struct pixel_argb_u16 *end = out_pixel + count;
+ u8 *src_pixels;
+ int step = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels);
+
+ while (out_pixel < end) {
+ *out_pixel = argb_u16_from_gray8(*src_pixels);
+ src_pixels += step;
+ out_pixel += 1;
+ }
+}
+
static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start,
enum pixel_read_direction direction, int count,
struct pixel_argb_u16 out_pixel[])
@@ -332,6 +512,92 @@ static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start,
}
/*
+ * This callback can be used for YUV formats where U and V values are
+ * stored in the same plane (often called semi-planar formats). It will
+ * correctly handle subsampling as described in the drm_format_info of the plane.
+ *
+ * The conversion matrix stored in the @plane is used to:
+ * - Apply the correct color range and encoding
+ * - Convert YUV and YVU with the same function (a column swap is needed when setting up
+ * plane->conversion_matrix)
+ */
+static void semi_planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ u8 *y_plane;
+ u8 *uv_plane;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
+ &y_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 1,
+ &uv_plane);
+ int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int step_uv = get_block_step_bytes(plane->frame_info->fb, direction, 1);
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
+
+ for (int i = 0; i < count; i++) {
+ *out_pixel = argb_u16_from_yuv888(y_plane[0], uv_plane[0], uv_plane[1],
+ conversion_matrix);
+ out_pixel += 1;
+ y_plane += step_y;
+ if ((i + subsampling_offset + 1) % subsampling == 0)
+ uv_plane += step_uv;
+ }
+}
+
+/*
+ * This callback can be used for YUV format where each color component is
+ * stored in a different plane (often called planar formats). It will
+ * correctly handle subsampling as described in the drm_format_info of the plane.
+ *
+ * The conversion matrix stored in the @plane is used to:
+ * - Apply the correct color range and encoding
+ * - Convert YUV and YVU with the same function (a column swap is needed when setting up
+ * plane->conversion_matrix)
+ */
+static void planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start,
+ int y_start, enum pixel_read_direction direction, int count,
+ struct pixel_argb_u16 out_pixel[])
+{
+ u8 *y_plane;
+ u8 *channel_1_plane;
+ u8 *channel_2_plane;
+
+ packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0,
+ &y_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 1,
+ &channel_1_plane);
+ packed_pixels_addr_1x1(plane->frame_info,
+ x_start / plane->frame_info->fb->format->hsub,
+ y_start / plane->frame_info->fb->format->vsub, 2,
+ &channel_2_plane);
+ int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0);
+ int step_channel_1 = get_block_step_bytes(plane->frame_info->fb, direction, 1);
+ int step_channel_2 = get_block_step_bytes(plane->frame_info->fb, direction, 2);
+ int subsampling = get_subsampling(plane->frame_info->fb->format, direction);
+ int subsampling_offset = get_subsampling_offset(direction, x_start, y_start);
+ const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix;
+
+ for (int i = 0; i < count; i++) {
+ *out_pixel = argb_u16_from_yuv888(*y_plane, *channel_1_plane, *channel_2_plane,
+ conversion_matrix);
+ out_pixel += 1;
+ y_plane += step_y;
+ if ((i + subsampling_offset + 1) % subsampling == 0) {
+ channel_1_plane += step_channel_1;
+ channel_2_plane += step_channel_2;
+ }
+ }
+}
+
+/*
* The following functions take one &struct pixel_argb_u16 and convert it to a specific format.
* The result is stored in @out_pixel.
*
@@ -456,6 +722,28 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
return &XRGB16161616_read_line;
case DRM_FORMAT_RGB565:
return &RGB565_read_line;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV24:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV42:
+ return &semi_planar_yuv_read_line;
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ return &planar_yuv_read_line;
+ case DRM_FORMAT_R1:
+ return &R1_read_line;
+ case DRM_FORMAT_R2:
+ return &R2_read_line;
+ case DRM_FORMAT_R4:
+ return &R4_read_line;
+ case DRM_FORMAT_R8:
+ return &R8_read_line;
default:
/*
* This is a bug in vkms_plane_atomic_check(). All the supported
@@ -469,6 +757,183 @@ pixel_read_line_t get_pixel_read_line_function(u32 format)
}
}
+/*
+ * Those matrices were generated using the colour python framework
+ *
+ * Below are the function calls used to generate each matrix, go to
+ * https://colour.readthedocs.io/en/develop/generated/colour.matrix_YCbCr.html
+ * for more info:
+ *
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix no_operation = {
+ .matrix = {
+ { 4294967296, 0, 0, },
+ { 0, 4294967296, 0, },
+ { 0, 0, 4294967296, },
+ },
+ .y_offset = 0,
+};
+
+static const struct conversion_matrix yuv_bt601_full = {
+ .matrix = {
+ { 4294967296, 0, 6021544149 },
+ { 4294967296, -1478054095, -3067191994 },
+ { 4294967296, 7610682049, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.601"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt601_limited = {
+ .matrix = {
+ { 5020601039, 0, 6881764740 },
+ { 5020601039, -1689204679, -3505362278 },
+ { 5020601039, 8697922339, 0 },
+ },
+ .y_offset = 16,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt709_full = {
+ .matrix = {
+ { 4294967296, 0, 6763714498 },
+ { 4294967296, -804551626, -2010578443 },
+ { 4294967296, 7969741314, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.709"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt709_limited = {
+ .matrix = {
+ { 5020601039, 0, 7729959424 },
+ { 5020601039, -919487572, -2297803934 },
+ { 5020601039, 9108275786, 0 },
+ },
+ .y_offset = 16,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * is_legal = False,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt2020_full = {
+ .matrix = {
+ { 4294967296, 0, 6333358775 },
+ { 4294967296, -706750298, -2453942994 },
+ { 4294967296, 8080551471, 0 },
+ },
+ .y_offset = 0,
+};
+
+/*
+ * numpy.around(colour.matrix_YCbCr(K=colour.WEIGHTS_YCBCR["ITU-R BT.2020"],
+ * is_legal = True,
+ * bits = 8) * 2**32).astype(int)
+ */
+static const struct conversion_matrix yuv_bt2020_limited = {
+ .matrix = {
+ { 5020601039, 0, 7238124312 },
+ { 5020601039, -807714626, -2804506279 },
+ { 5020601039, 9234915964, 0 },
+ },
+ .y_offset = 16,
+};
+
+/**
+ * swap_uv_columns() - Swap u and v column of a given matrix
+ *
+ * @matrix: Matrix in which column are swapped
+ */
+static void swap_uv_columns(struct conversion_matrix *matrix)
+{
+ swap(matrix->matrix[0][2], matrix->matrix[0][1]);
+ swap(matrix->matrix[1][2], matrix->matrix[1][1]);
+ swap(matrix->matrix[2][2], matrix->matrix[2][1]);
+}
+
+/**
+ * get_conversion_matrix_to_argb_u16() - Retrieve the correct yuv to rgb conversion matrix for a
+ * given encoding and range.
+ *
+ * @format: DRM_FORMAT_* value for which to obtain a conversion function (see [drm_fourcc.h])
+ * @encoding: DRM_COLOR_* value for which to obtain a conversion matrix
+ * @range: DRM_COLOR_*_RANGE value for which to obtain a conversion matrix
+ * @matrix: Pointer to store the value into
+ */
+void get_conversion_matrix_to_argb_u16(u32 format,
+ enum drm_color_encoding encoding,
+ enum drm_color_range range,
+ struct conversion_matrix *matrix)
+{
+ const struct conversion_matrix *matrix_to_copy;
+ bool limited_range;
+
+ switch (range) {
+ case DRM_COLOR_YCBCR_LIMITED_RANGE:
+ limited_range = true;
+ break;
+ case DRM_COLOR_YCBCR_FULL_RANGE:
+ limited_range = false;
+ break;
+ case DRM_COLOR_RANGE_MAX:
+ limited_range = false;
+ WARN_ONCE(true, "The requested range is not supported.");
+ break;
+ }
+
+ switch (encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ matrix_to_copy = limited_range ? &yuv_bt601_limited :
+ &yuv_bt601_full;
+ break;
+ case DRM_COLOR_YCBCR_BT709:
+ matrix_to_copy = limited_range ? &yuv_bt709_limited :
+ &yuv_bt709_full;
+ break;
+ case DRM_COLOR_YCBCR_BT2020:
+ matrix_to_copy = limited_range ? &yuv_bt2020_limited :
+ &yuv_bt2020_full;
+ break;
+ case DRM_COLOR_ENCODING_MAX:
+ matrix_to_copy = &no_operation;
+ WARN_ONCE(true, "The requested encoding is not supported.");
+ break;
+ }
+
+ memcpy(matrix, matrix_to_copy, sizeof(*matrix_to_copy));
+
+ switch (format) {
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_NV42:
+ swap_uv_columns(matrix);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(get_conversion_matrix_to_argb_u16);
+
/**
* get_pixel_write_function() - Retrieve the correct write_pixel function for a specific format.
* The returned pointer is NULL for unsupported pixel formats. The caller must ensure that the
diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h
index 8d2bef95ff79..b4fe62ab9c65 100644
--- a/drivers/gpu/drm/vkms/vkms_formats.h
+++ b/drivers/gpu/drm/vkms/vkms_formats.h
@@ -9,4 +9,13 @@ pixel_read_line_t get_pixel_read_line_function(u32 format);
pixel_write_t get_pixel_write_function(u32 format);
+void get_conversion_matrix_to_argb_u16(u32 format, enum drm_color_encoding encoding,
+ enum drm_color_range range,
+ struct conversion_matrix *matrix);
+
+#if IS_ENABLED(CONFIG_KUNIT)
+struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2,
+ const struct conversion_matrix *matrix);
+#endif
+
#endif /* _VKMS_FORMATS_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index e34f8c7f83c3..e3fdd161d0f0 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -18,7 +18,23 @@ static const u32 vkms_formats[] = {
DRM_FORMAT_ABGR8888,
DRM_FORMAT_XRGB16161616,
DRM_FORMAT_ARGB16161616,
- DRM_FORMAT_RGB565
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV24,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_NV42,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YUV444,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU422,
+ DRM_FORMAT_YVU444,
+ DRM_FORMAT_R1,
+ DRM_FORMAT_R2,
+ DRM_FORMAT_R4,
+ DRM_FORMAT_R8,
};
static struct drm_plane_state *
@@ -119,6 +135,8 @@ static void vkms_plane_atomic_update(struct drm_plane *plane,
frame_info->rotation = new_state->rotation;
vkms_plane_state->pixel_read_line = get_pixel_read_line_function(fmt);
+ get_conversion_matrix_to_argb_u16(fmt, new_state->color_encoding, new_state->color_range,
+ &vkms_plane_state->conversion_matrix);
}
static int vkms_plane_atomic_check(struct drm_plane *plane,
@@ -205,5 +223,14 @@ struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
drm_plane_create_rotation_property(&plane->base, DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK);
+ drm_plane_create_color_properties(&plane->base,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT601,
+ DRM_COLOR_YCBCR_FULL_RANGE);
+
return plane;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index dd4ca6a9c690..8fe02131a6c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -544,7 +544,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_cmd_commit_flush(dev_priv, bytes);
- vmw_update_seqno(dev_priv);
+ vmw_fences_update(dev_priv->fman);
out_err:
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 0695a342b1ef..bc0342c58b4b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -440,8 +440,10 @@ static int vmw_device_init(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
}
- dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ u32 seqno = vmw_fence_read(dev_priv);
+
+ atomic_set(&dev_priv->last_read_seqno, seqno);
+ atomic_set(&dev_priv->marker_seq, seqno);
return 0;
}
@@ -454,7 +456,7 @@ static void vmw_device_fini(struct vmw_private *vmw)
while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
;
- vmw->last_read_seqno = vmw_fence_read(vmw);
+ atomic_set(&vmw->last_read_seqno, vmw_fence_read(vmw));
vmw_write(vmw, SVGA_REG_CONFIG_DONE,
vmw->config_done_state);
@@ -713,7 +715,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
pci_set_master(pdev);
- ret = pci_request_regions(pdev, "vmwgfx probe");
+ ret = pcim_request_all_regions(pdev, "vmwgfx probe");
if (ret)
return ret;
@@ -733,7 +735,6 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
if (!dev->rmmio) {
drm_err(&dev->drm,
"Failed mapping registers mmio memory.\n");
- pci_release_regions(pdev);
return -ENOMEM;
}
} else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
@@ -754,11 +755,9 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
if (IS_ERR(dev->fifo_mem)) {
drm_err(&dev->drm,
"Failed mapping FIFO memory.\n");
- pci_release_regions(pdev);
return PTR_ERR(dev->fifo_mem);
}
} else {
- pci_release_regions(pdev);
return -EINVAL;
}
@@ -836,7 +835,6 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
int ret;
enum vmw_res_type i;
bool refuse_dma = false;
- struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
vmw_sw_context_init(dev_priv);
@@ -852,7 +850,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
return ret;
ret = vmw_detect_version(dev_priv);
if (ret)
- goto out_no_pci_or_version;
+ return ret;
for (i = vmw_res_context; i < vmw_res_max; ++i) {
@@ -1152,15 +1150,13 @@ out_err0:
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
-out_no_pci_or_version:
- pci_release_regions(pdev);
+
return ret;
}
static void vmw_driver_unload(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb);
@@ -1196,8 +1192,6 @@ static void vmw_driver_unload(struct drm_device *dev)
idr_destroy(&dev_priv->res_idr[i]);
vmw_mksstat_remove_all(dev_priv);
-
- pci_release_regions(pdev);
}
static void vmw_postclose(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 594af8eb04c6..eda5b6f8f4c4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -522,7 +522,7 @@ struct vmw_private {
int cmdbuf_waiters; /* Protected by waiter_lock */
int error_waiters; /* Protected by waiter_lock */
int fifo_queue_waiters; /* Protected by waiter_lock */
- uint32_t last_read_seqno;
+ atomic_t last_read_seqno;
struct vmw_fence_manager *fman;
uint32_t irq_mask; /* Updates protected by waiter_lock */
@@ -1006,15 +1006,14 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t seqno,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
-extern void vmw_goal_waiter_remove(struct vmw_private *dev_priv);
-extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
- int *waiter_count);
-extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
- u32 flag, int *waiter_count);
+bool vmw_seqno_waiter_add(struct vmw_private *dev_priv);
+bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
+bool vmw_goal_waiter_add(struct vmw_private *dev_priv);
+bool vmw_goal_waiter_remove(struct vmw_private *dev_priv);
+bool vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag,
+ int *waiter_count);
+bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+ u32 flag, int *waiter_count);
/**
* Kernel modesetting - vmwgfx_kms.c
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index e831e324e737..819704ac675d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3878,8 +3878,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->base.seqno;
- vmw_update_seqno(dev_priv);
- fence_rep.passed_seqno = dev_priv->last_read_seqno;
+ fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman);
}
/*
@@ -4068,23 +4067,6 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
return 0;
}
-/*
- * DMA fence callback to remove a seqno_waiter
- */
-struct seqno_waiter_rm_context {
- struct dma_fence_cb base;
- struct vmw_private *dev_priv;
-};
-
-static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb)
-{
- struct seqno_waiter_rm_context *ctx =
- container_of(cb, struct seqno_waiter_rm_context, base);
-
- vmw_seqno_waiter_remove(ctx->dev_priv);
- kfree(ctx);
-}
-
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
void __user *user_commands, void *kernel_commands,
@@ -4265,15 +4247,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
- struct seqno_waiter_rm_context *ctx =
- kmalloc(sizeof(*ctx), GFP_KERNEL);
- ctx->dev_priv = dev_priv;
- vmw_seqno_waiter_add(dev_priv);
- if (dma_fence_add_callback(&fence->base, &ctx->base,
- seqno_waiter_rm_cb) < 0) {
- vmw_seqno_waiter_remove(dev_priv);
- kfree(ctx);
- }
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 588d50ababf6..c2294abbe753 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,32 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
+ * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
*
**************************************************************************/
-#include <linux/sched/signal.h>
-
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 31)
@@ -35,14 +14,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work;
bool fifo_down;
- struct list_head cleanup_list;
- uint32_t pending_actions[VMW_ACTION_MAX];
- struct mutex goal_irq_mutex;
- bool goal_irq_on; /* Protected by @goal_irq_mutex */
- bool seqno_valid; /* Protected by @lock, and may not be set to true
- without the @goal_irq_mutex held. */
u64 ctx;
};
@@ -52,12 +24,10 @@ struct vmw_user_fence {
};
/**
- * struct vmw_event_fence_action - fence action that delivers a drm event.
+ * struct vmw_event_fence_action - fence callback that delivers a DRM event.
*
- * @action: A struct vmw_fence_action to hook up to a fence.
+ * @base: For use with dma_fence_add_callback(...)
* @event: A pointer to the pending event.
- * @fence: A referenced pointer to the fence to keep it alive while @action
- * hangs on it.
* @dev: Pointer to a struct drm_device so we can access the event stuff.
* @tv_sec: If non-null, the variable pointed to will be assigned
* current time tv_sec val when the fence signals.
@@ -65,10 +35,9 @@ struct vmw_user_fence {
* be assigned the current time tv_usec val when the fence signals.
*/
struct vmw_event_fence_action {
- struct vmw_fence_action action;
+ struct dma_fence_cb base;
struct drm_pending_event *event;
- struct vmw_fence_obj *fence;
struct drm_device *dev;
uint32_t *tv_sec;
@@ -81,44 +50,6 @@ fman_from_fence(struct vmw_fence_obj *fence)
return container_of(fence->base.lock, struct vmw_fence_manager, lock);
}
-static u32 vmw_fence_goal_read(struct vmw_private *vmw)
-{
- if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
- return vmw_read(vmw, SVGA_REG_FENCE_GOAL);
- else
- return vmw_fifo_mem_read(vmw, SVGA_FIFO_FENCE_GOAL);
-}
-
-static void vmw_fence_goal_write(struct vmw_private *vmw, u32 value)
-{
- if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0)
- vmw_write(vmw, SVGA_REG_FENCE_GOAL, value);
- else
- vmw_fifo_mem_write(vmw, SVGA_FIFO_FENCE_GOAL, value);
-}
-
-/*
- * Note on fencing subsystem usage of irqs:
- * Typically the vmw_fences_update function is called
- *
- * a) When a new fence seqno has been submitted by the fifo code.
- * b) On-demand when we have waiters. Sleeping waiters will switch on the
- * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
- * irq is received. When the last fence waiter is gone, that IRQ is masked
- * away.
- *
- * In situations where there are no waiters and we don't submit any new fences,
- * fence objects may not be signaled. This is perfectly OK, since there are
- * no consumers of the signaled data, but that is NOT ok when there are fence
- * actions attached to a fence. The fencing subsystem then makes use of the
- * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
- * which has an action attached, and each time vmw_fences_update is called,
- * the subsystem makes sure the fence goal seqno is updated.
- *
- * The fence goal seqno irq is on as long as there are unsignaled fence
- * objects with actions attached to them.
- */
-
static void vmw_fence_obj_destroy(struct dma_fence *f)
{
struct vmw_fence_obj *fence =
@@ -126,8 +57,21 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
if (!list_empty(&fence->head)) {
+ /* The fence manager still has an implicit reference to this
+ * fence via the fence list if head is set. Because the lock is
+ * required to be held when the fence manager updates the fence
+ * list either the fence will have been removed after we get
+ * the lock below or we can safely remove it and the fence
+ * manager will never see it. This implies the fence is being
+ * deleted without being signaled which is dubious but valid
+ * if there are no callbacks. The dma_fence code that calls
+ * this hook will warn about deleted unsignaled with callbacks
+ * so no need to warn again here.
+ */
spin_lock(&fman->lock);
list_del_init(&fence->head);
+ if (fence->waiter_added)
+ vmw_seqno_waiter_remove(fman->dev_priv);
spin_unlock(&fman->lock);
}
fence->destroy(fence);
@@ -143,165 +87,46 @@ static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
return "svga";
}
+/* When we toggle signaling for the SVGA device there is a race period from
+ * the time we first read the fence seqno to the time we enable interrupts.
+ * If we miss the interrupt for a fence during this period its likely the driver
+ * will stall. As a result we need to re-read the seqno after interrupts are
+ * enabled. If interrupts were already enabled we just increment the number of
+ * seqno waiters.
+ */
static bool vmw_fence_enable_signaling(struct dma_fence *f)
{
+ u32 seqno;
struct vmw_fence_obj *fence =
container_of(f, struct vmw_fence_obj, base);
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
-
- u32 seqno = vmw_fence_read(dev_priv);
- if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
+check_for_race:
+ seqno = vmw_fence_read(dev_priv);
+ if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
+ if (fence->waiter_added) {
+ vmw_seqno_waiter_remove(dev_priv);
+ fence->waiter_added = false;
+ }
return false;
-
+ } else if (!fence->waiter_added) {
+ fence->waiter_added = true;
+ if (vmw_seqno_waiter_add(dev_priv))
+ goto check_for_race;
+ }
return true;
}
-struct vmwgfx_wait_cb {
- struct dma_fence_cb base;
- struct task_struct *task;
-};
-
-static void
-vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
-{
- struct vmwgfx_wait_cb *wait =
- container_of(cb, struct vmwgfx_wait_cb, base);
-
- wake_up_process(wait->task);
-}
-
-static void __vmw_fences_update(struct vmw_fence_manager *fman);
-
-static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
-{
- struct vmw_fence_obj *fence =
- container_of(f, struct vmw_fence_obj, base);
-
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- struct vmw_private *dev_priv = fman->dev_priv;
- struct vmwgfx_wait_cb cb;
- long ret = timeout;
-
- if (likely(vmw_fence_obj_signaled(fence)))
- return timeout;
-
- vmw_seqno_waiter_add(dev_priv);
-
- spin_lock(f->lock);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
- goto out;
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- goto out;
- }
-
- cb.base.func = vmwgfx_wait_cb;
- cb.task = current;
- list_add(&cb.base.node, &f->cb_list);
-
- for (;;) {
- __vmw_fences_update(fman);
-
- /*
- * We can use the barrier free __set_current_state() since
- * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the
- * fence spinlock.
- */
- if (intr)
- __set_current_state(TASK_INTERRUPTIBLE);
- else
- __set_current_state(TASK_UNINTERRUPTIBLE);
-
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) {
- if (ret == 0 && timeout > 0)
- ret = 1;
- break;
- }
-
- if (intr && signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- if (ret == 0)
- break;
-
- spin_unlock(f->lock);
-
- ret = schedule_timeout(ret);
-
- spin_lock(f->lock);
- }
- __set_current_state(TASK_RUNNING);
- if (!list_empty(&cb.base.node))
- list_del(&cb.base.node);
-
-out:
- spin_unlock(f->lock);
-
- vmw_seqno_waiter_remove(dev_priv);
-
- return ret;
-}
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman);
static const struct dma_fence_ops vmw_fence_ops = {
.get_driver_name = vmw_fence_get_driver_name,
.get_timeline_name = vmw_fence_get_timeline_name,
.enable_signaling = vmw_fence_enable_signaling,
- .wait = vmw_fence_wait,
.release = vmw_fence_obj_destroy,
};
-/*
- * Execute signal actions on fences recently signaled.
- * This is done from a workqueue so we don't have to execute
- * signal actions from atomic context.
- */
-
-static void vmw_fence_work_func(struct work_struct *work)
-{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, work);
- struct list_head list;
- struct vmw_fence_action *action, *next_action;
- bool seqno_valid;
-
- do {
- INIT_LIST_HEAD(&list);
- mutex_lock(&fman->goal_irq_mutex);
-
- spin_lock(&fman->lock);
- list_splice_init(&fman->cleanup_list, &list);
- seqno_valid = fman->seqno_valid;
- spin_unlock(&fman->lock);
-
- if (!seqno_valid && fman->goal_irq_on) {
- fman->goal_irq_on = false;
- vmw_goal_waiter_remove(fman->dev_priv);
- }
- mutex_unlock(&fman->goal_irq_mutex);
-
- if (list_empty(&list))
- return;
-
- /*
- * At this point, only we should be able to manipulate the
- * list heads of the actions we have on the private list.
- * hence fman::lock not held.
- */
-
- list_for_each_entry_safe(action, next_action, &list, head) {
- list_del_init(&action->head);
- if (action->cleanup)
- action->cleanup(action);
- }
- } while (1);
-}
-
struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
{
struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
@@ -312,10 +137,7 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
fman->dev_priv = dev_priv;
spin_lock_init(&fman->lock);
INIT_LIST_HEAD(&fman->fence_list);
- INIT_LIST_HEAD(&fman->cleanup_list);
- INIT_WORK(&fman->work, &vmw_fence_work_func);
fman->fifo_down = true;
- mutex_init(&fman->goal_irq_mutex);
fman->ctx = dma_fence_context_alloc(1);
return fman;
@@ -325,11 +147,8 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
{
bool lists_empty;
- (void) cancel_work_sync(&fman->work);
-
spin_lock(&fman->lock);
- lists_empty = list_empty(&fman->fence_list) &&
- list_empty(&fman->cleanup_list);
+ lists_empty = list_empty(&fman->fence_list);
spin_unlock(&fman->lock);
BUG_ON(!lists_empty);
@@ -344,7 +163,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
fman->ctx, seqno);
- INIT_LIST_HEAD(&fence->seq_passed_actions);
fence->destroy = destroy;
spin_lock(&fman->lock);
@@ -352,6 +170,11 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
ret = -EBUSY;
goto out_unlock;
}
+ /* This creates an implicit reference to the fence from the fence
+ * manager. It will be dropped when the fence is signaled which is
+ * expected to happen before deletion. The dtor has code to catch
+ * the rare deletion before signaling case.
+ */
list_add_tail(&fence->head, &fman->fence_list);
out_unlock:
@@ -360,148 +183,35 @@ out_unlock:
}
-static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
- struct list_head *list)
-{
- struct vmw_fence_action *action, *next_action;
-
- list_for_each_entry_safe(action, next_action, list, head) {
- list_del_init(&action->head);
- fman->pending_actions[action->type]--;
- if (action->seq_passed != NULL)
- action->seq_passed(action);
-
- /*
- * Add the cleanup action to the cleanup list so that
- * it will be performed by a worker task.
- */
-
- list_add_tail(&action->head, &fman->cleanup_list);
- }
-}
-
-/**
- * vmw_fence_goal_new_locked - Figure out a new device fence goal
- * seqno if needed.
- *
- * @fman: Pointer to a fence manager.
- * @passed_seqno: The seqno the device currently signals as passed.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when we have a new passed_seqno, and
- * we might need to update the fence goal. It checks to see whether
- * the current fence goal has already passed, and, in that case,
- * scans through all unsignaled fences to get the next fence object with an
- * action attached, and sets the seqno of that fence as a new fence goal.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
- u32 passed_seqno)
-{
- u32 goal_seqno;
- struct vmw_fence_obj *fence, *next_fence;
-
- if (likely(!fman->seqno_valid))
- return false;
-
- goal_seqno = vmw_fence_goal_read(fman->dev_priv);
- if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
- return false;
-
- fman->seqno_valid = false;
- list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
- if (!list_empty(&fence->seq_passed_actions)) {
- fman->seqno_valid = true;
- vmw_fence_goal_write(fman->dev_priv,
- fence->base.seqno);
- break;
- }
- }
-
- return true;
-}
-
-
-/**
- * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
- * needed.
- *
- * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
- * considered as a device fence goal.
- *
- * This function should be called with the fence manager lock held.
- * It is typically called when an action has been attached to a fence to
- * check whether the seqno of that fence should be used for a fence
- * goal interrupt. This is typically needed if the current fence goal is
- * invalid, or has a higher seqno than that of the current fence object.
- *
- * returns true if the device goal seqno was updated. False otherwise.
- */
-static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
-{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- u32 goal_seqno;
-
- if (dma_fence_is_signaled_locked(&fence->base))
- return false;
-
- goal_seqno = vmw_fence_goal_read(fman->dev_priv);
- if (likely(fman->seqno_valid &&
- goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
- return false;
-
- vmw_fence_goal_write(fman->dev_priv, fence->base.seqno);
- fman->seqno_valid = true;
-
- return true;
-}
-
-static void __vmw_fences_update(struct vmw_fence_manager *fman)
+static u32 __vmw_fences_update(struct vmw_fence_manager *fman)
{
struct vmw_fence_obj *fence, *next_fence;
- struct list_head action_list;
- bool needs_rerun;
- uint32_t seqno, new_seqno;
+ const bool cookie = dma_fence_begin_signalling();
+ const u32 seqno = vmw_fence_read(fman->dev_priv);
- seqno = vmw_fence_read(fman->dev_priv);
-rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
list_del_init(&fence->head);
+ if (fence->waiter_added) {
+ vmw_seqno_waiter_remove(fman->dev_priv);
+ fence->waiter_added = false;
+ }
dma_fence_signal_locked(&fence->base);
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
} else
break;
}
-
- /*
- * Rerun if the fence goal seqno was updated, and the
- * hardware might have raced with that update, so that
- * we missed a fence_goal irq.
- */
-
- needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
- if (unlikely(needs_rerun)) {
- new_seqno = vmw_fence_read(fman->dev_priv);
- if (new_seqno != seqno) {
- seqno = new_seqno;
- goto rerun;
- }
- }
-
- if (!list_empty(&fman->cleanup_list))
- (void) schedule_work(&fman->work);
+ dma_fence_end_signalling(cookie);
+ atomic_set_release(&fman->dev_priv->last_read_seqno, seqno);
+ return seqno;
}
-void vmw_fences_update(struct vmw_fence_manager *fman)
+u32 vmw_fences_update(struct vmw_fence_manager *fman)
{
+ u32 seqno;
spin_lock(&fman->lock);
- __vmw_fences_update(fman);
+ seqno = __vmw_fences_update(fman);
spin_unlock(&fman->lock);
+ return seqno;
}
bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
@@ -539,14 +249,13 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(!fence))
return -ENOMEM;
- ret = vmw_fence_obj_init(fman, fence, seqno,
- vmw_fence_destroy);
+ ret = vmw_fence_obj_init(fman, fence, seqno, vmw_fence_destroy);
if (unlikely(ret != 0))
goto out_err_init;
@@ -638,7 +347,6 @@ out_no_object:
void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
{
- struct list_head action_list;
int ret;
/*
@@ -661,10 +369,6 @@ void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
if (unlikely(ret != 0)) {
list_del_init(&fence->head);
dma_fence_signal(&fence->base);
- INIT_LIST_HEAD(&action_list);
- list_splice_init(&fence->seq_passed_actions,
- &action_list);
- vmw_fences_perform_actions(fman, &action_list);
}
BUG_ON(!list_empty(&fence->head));
@@ -778,7 +482,6 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_signaled_arg *) data;
struct ttm_base_object *base;
struct vmw_fence_obj *fence;
- struct vmw_fence_manager *fman;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
@@ -787,14 +490,11 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
- fman = fman_from_fence(fence);
arg->signaled = vmw_fence_obj_signaled(fence);
arg->signaled_flags = arg->flags;
- spin_lock(&fman->lock);
- arg->passed_seqno = dev_priv->last_read_seqno;
- spin_unlock(&fman->lock);
+ arg->passed_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
ttm_base_object_unref(&base);
@@ -822,10 +522,11 @@ int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
* attached has passed. It queues the event on the submitter's event list.
* This function is always called from atomic context.
*/
-static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
+static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
+ struct dma_fence_cb *cb)
{
struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
+ container_of(cb, struct vmw_event_fence_action, base);
struct drm_device *dev = eaction->dev;
struct drm_pending_event *event = eaction->event;
@@ -837,7 +538,7 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
if (likely(eaction->tv_sec != NULL)) {
struct timespec64 ts;
- ktime_get_ts64(&ts);
+ ktime_to_timespec64(f->timestamp);
/* monotonic time, so no y2038 overflow */
*eaction->tv_sec = ts.tv_sec;
*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
@@ -846,75 +547,10 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
drm_send_event_locked(dev, eaction->event);
eaction->event = NULL;
spin_unlock_irq(&dev->event_lock);
-}
-
-/**
- * vmw_event_fence_action_cleanup
- *
- * @action: The struct vmw_fence_action embedded in a struct
- * vmw_event_fence_action.
- *
- * This function is the struct vmw_fence_action destructor. It's typically
- * called from a workqueue.
- */
-static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
-{
- struct vmw_event_fence_action *eaction =
- container_of(action, struct vmw_event_fence_action, action);
-
- vmw_fence_obj_unreference(&eaction->fence);
+ dma_fence_put(f);
kfree(eaction);
}
-
-/**
- * vmw_fence_obj_add_action - Add an action to a fence object.
- *
- * @fence: The fence object.
- * @action: The action to add.
- *
- * Note that the action callbacks may be executed before this function
- * returns.
- */
-static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
- struct vmw_fence_action *action)
-{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
- bool run_update = false;
-
- mutex_lock(&fman->goal_irq_mutex);
- spin_lock(&fman->lock);
-
- fman->pending_actions[action->type]++;
- if (dma_fence_is_signaled_locked(&fence->base)) {
- struct list_head action_list;
-
- INIT_LIST_HEAD(&action_list);
- list_add_tail(&action->head, &action_list);
- vmw_fences_perform_actions(fman, &action_list);
- } else {
- list_add_tail(&action->head, &fence->seq_passed_actions);
-
- /*
- * This function may set fman::seqno_valid, so it must
- * be run with the goal_irq_mutex held.
- */
- run_update = vmw_fence_goal_check_locked(fence);
- }
-
- spin_unlock(&fman->lock);
-
- if (run_update) {
- if (!fman->goal_irq_on) {
- fman->goal_irq_on = true;
- vmw_goal_waiter_add(fman->dev_priv);
- }
- vmw_fences_update(fman);
- }
- mutex_unlock(&fman->goal_irq_mutex);
-
-}
-
/**
* vmw_event_fence_action_queue - Post an event for sending when a fence
* object seqno has passed.
@@ -949,18 +585,14 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv,
return -ENOMEM;
eaction->event = event;
-
- eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
- eaction->action.cleanup = vmw_event_fence_action_cleanup;
- eaction->action.type = VMW_ACTION_EVENT;
-
- eaction->fence = vmw_fence_obj_reference(fence);
eaction->dev = &fman->dev_priv->drm;
eaction->tv_sec = tv_sec;
eaction->tv_usec = tv_usec;
- vmw_fence_obj_add_action(fence, &eaction->action);
-
+ vmw_fence_obj_reference(fence); // Dropped in CB
+ if (dma_fence_add_callback(&fence->base, &eaction->base,
+ vmw_event_fence_action_seq_passed) < 0)
+ vmw_event_fence_action_seq_passed(&fence->base, &eaction->base);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index a7eee579c76a..e897cccae1ae 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -39,27 +39,10 @@ struct drm_pending_event;
struct vmw_private;
struct vmw_fence_manager;
-/**
- *
- *
- */
-enum vmw_action_type {
- VMW_ACTION_EVENT = 0,
- VMW_ACTION_MAX
-};
-
-struct vmw_fence_action {
- struct list_head head;
- enum vmw_action_type type;
- void (*seq_passed) (struct vmw_fence_action *action);
- void (*cleanup) (struct vmw_fence_action *action);
-};
-
struct vmw_fence_obj {
struct dma_fence base;
-
+ bool waiter_added;
struct list_head head;
- struct list_head seq_passed_actions;
void (*destroy)(struct vmw_fence_obj *fence);
};
@@ -86,7 +69,7 @@ vmw_fence_obj_reference(struct vmw_fence_obj *fence)
return fence;
}
-extern void vmw_fences_update(struct vmw_fence_manager *fman);
+u32 vmw_fences_update(struct vmw_fence_manager *fman);
extern bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 086e69a130d4..05773eb394d3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -123,26 +123,17 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
-void vmw_update_seqno(struct vmw_private *dev_priv)
-{
- uint32_t seqno = vmw_fence_read(dev_priv);
-
- if (dev_priv->last_read_seqno != seqno) {
- dev_priv->last_read_seqno = seqno;
- vmw_fences_update(dev_priv->fman);
- }
-}
-
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
bool ret;
+ u32 last_read_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
- vmw_update_seqno(dev_priv);
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
+ last_read_seqno = vmw_fences_update(dev_priv->fman);
+ if (last_read_seqno - seqno < VMW_FENCE_WRAP)
return true;
if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno))
@@ -239,51 +230,59 @@ out_err:
return ret;
}
-void vmw_generic_waiter_add(struct vmw_private *dev_priv,
+bool vmw_generic_waiter_add(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
- spin_lock_bh(&dev_priv->waiter_lock);
+ bool hw_programmed = false;
+
+ spin_lock(&dev_priv->waiter_lock);
if ((*waiter_count)++ == 0) {
vmw_irq_status_write(dev_priv, flag);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ hw_programmed = true;
}
- spin_unlock_bh(&dev_priv->waiter_lock);
+ spin_unlock(&dev_priv->waiter_lock);
+ return hw_programmed;
}
-void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
+bool vmw_generic_waiter_remove(struct vmw_private *dev_priv,
u32 flag, int *waiter_count)
{
- spin_lock_bh(&dev_priv->waiter_lock);
+ bool hw_programmed = false;
+
+ spin_lock(&dev_priv->waiter_lock);
if (--(*waiter_count) == 0) {
dev_priv->irq_mask &= ~flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
+ hw_programmed = true;
}
- spin_unlock_bh(&dev_priv->waiter_lock);
+ spin_unlock(&dev_priv->waiter_lock);
+ return hw_programmed;
}
-void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
+bool vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
- &dev_priv->fence_queue_waiters);
+ return vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+ &dev_priv->fence_queue_waiters);
}
-void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
+bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
- &dev_priv->fence_queue_waiters);
+ return vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
+ &dev_priv->fence_queue_waiters);
}
-void vmw_goal_waiter_add(struct vmw_private *dev_priv)
+bool vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
- &dev_priv->goal_queue_waiters);
+ return vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv),
+ &dev_priv->goal_queue_waiters);
}
-void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
+bool vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
- &dev_priv->goal_queue_waiters);
+ return vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv),
+ &dev_priv->goal_queue_waiters);
}
static void vmw_irq_preinstall(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 7fb1c88bcc47..69dfe69ce0f8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -896,7 +896,7 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_device,
.size = size,
- .pin = true,
+ .pin = false,
.keep_resv = true,
};
diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig
index fcc2677a4229..30ed74ad29ab 100644
--- a/drivers/gpu/drm/xe/Kconfig
+++ b/drivers/gpu/drm/xe/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_XE
- tristate "Intel Xe Graphics"
+ tristate "Intel Xe2 Graphics"
depends on DRM && PCI && (m || (y && KUNIT=y))
depends on INTEL_VSEC || !INTEL_VSEC
depends on X86_PLATFORM_DEVICES || !(X86 && ACPI)
@@ -31,7 +31,6 @@ config DRM_XE
select ACPI_VIDEO if X86 && ACPI
select ACPI_WMI if X86 && ACPI
select SYNC_FILE
- select IOSF_MBI
select CRC32
select SND_HDA_I915 if SND_HDA_CORE
select CEC_CORE if CEC_NOTIFIER
@@ -46,7 +45,8 @@ config DRM_XE
select AUXILIARY_BUS
select HMM_MIRROR
help
- Experimental driver for Intel Xe series GPUs
+ Driver for Intel Xe2 series GPUs and later. Experimental support
+ for Xe series is also available.
If "M" is selected, the module will be called xe.
diff --git a/drivers/gpu/drm/xe/Kconfig.debug b/drivers/gpu/drm/xe/Kconfig.debug
index 0d749ed44878..01735c6ece8b 100644
--- a/drivers/gpu/drm/xe/Kconfig.debug
+++ b/drivers/gpu/drm/xe/Kconfig.debug
@@ -86,12 +86,17 @@ config DRM_XE_KUNIT_TEST
If in doubt, say "N".
-config DRM_XE_LARGE_GUC_BUFFER
- bool "Enable larger guc log buffer"
+config DRM_XE_DEBUG_GUC
+ bool "Enable extra GuC related debug options"
+ depends on DRM_XE_DEBUG
default n
+ select STACKDEPOT
help
Choose this option when debugging guc issues.
- Buffer should be large enough for complex issues.
+ The GuC log buffer is increased to the maximum allowed, which should
+ be large enough for complex issues. The tracking of FAST_REQ messages
+ is extended to include a record of the calling stack, which is then
+ dumped on a FAST_REQ error notification.
Recommended for driver developers only.
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index e4bf484d4121..f5f5775acdc0 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -139,7 +139,8 @@ xe-y += \
xe_guc_relay.o \
xe_memirq.o \
xe_sriov.o \
- xe_sriov_vf.o
+ xe_sriov_vf.o \
+ xe_tile_sriov_vf.o
xe-$(CONFIG_PCI_IOV) += \
xe_gt_sriov_pf.o \
diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
index 448afb86e05c..ff4f412c28d8 100644
--- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h
@@ -161,6 +161,37 @@ enum xe_guc_preempt_options {
XE_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
};
+enum xe_guc_register_context_param_offsets {
+ XE_GUC_REGISTER_CONTEXT_DATA_0_MBZ = 0,
+ XE_GUC_REGISTER_CONTEXT_DATA_1_FLAGS,
+ XE_GUC_REGISTER_CONTEXT_DATA_2_CONTEXT_INDEX,
+ XE_GUC_REGISTER_CONTEXT_DATA_3_ENGINE_CLASS,
+ XE_GUC_REGISTER_CONTEXT_DATA_4_ENGINE_SUBMIT_MASK,
+ XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
+ XE_GUC_REGISTER_CONTEXT_DATA_6_WQ_DESC_ADDR_UPPER,
+ XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
+ XE_GUC_REGISTER_CONTEXT_DATA_8_WQ_BUF_BASE_UPPER,
+ XE_GUC_REGISTER_CONTEXT_DATA_9_WQ_BUF_SIZE,
+ XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR,
+ XE_GUC_REGISTER_CONTEXT_MSG_LEN,
+};
+
+enum xe_guc_register_context_multi_lrc_param_offsets {
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_0_MBZ = 0,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_1_FLAGS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_2_PARENT_CONTEXT,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_3_ENGINE_CLASS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_4_ENGINE_SUBMIT_MASK,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_6_WQ_DESC_ADDR_UPPER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_8_WQ_BUF_BASE_UPPER,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_9_WQ_BUF_SIZE,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR,
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN = 11,
+};
+
enum xe_guc_report_status {
XE_GUC_REPORT_STATUS_UNKNOWN = 0x0,
XE_GUC_REPORT_STATUS_ACKED = 0x1,
diff --git a/drivers/gpu/drm/xe/abi/guc_errors_abi.h b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
index 2c627a21648f..ecf748fd87df 100644
--- a/drivers/gpu/drm/xe/abi/guc_errors_abi.h
+++ b/drivers/gpu/drm/xe/abi/guc_errors_abi.h
@@ -6,8 +6,7 @@
#ifndef _ABI_GUC_ERRORS_ABI_H
#define _ABI_GUC_ERRORS_ABI_H
-enum xe_guc_response_status {
- XE_GUC_RESPONSE_STATUS_SUCCESS = 0x0,
+enum xe_guc_response {
XE_GUC_RESPONSE_ERROR_PROTOCOL = 0x04,
XE_GUC_RESPONSE_INVALID_STATE = 0x0A,
XE_GUC_RESPONSE_UNSUPPORTED_VERSION = 0x0B,
@@ -21,12 +20,20 @@ enum xe_guc_response_status {
XE_GUC_RESPONSE_CANNOT_COMPLETE_ACTION = 0x41,
XE_GUC_RESPONSE_INVALID_KLV_DATA = 0x50,
XE_GUC_RESPONSE_INVALID_PARAMS = 0x60,
+ XE_GUC_RESPONSE_INVALID_CONTEXT_INDEX = 0x61,
+ XE_GUC_RESPONSE_INVALID_CONTEXT_REGISTRATION = 0x62,
+ XE_GUC_RESPONSE_INVALID_DOORBELL_ID = 0x63,
+ XE_GUC_RESPONSE_INVALID_ENGINE_ID = 0x64,
XE_GUC_RESPONSE_INVALID_BUFFER_RANGE = 0x70,
XE_GUC_RESPONSE_INVALID_BUFFER = 0x71,
+ XE_GUC_RESPONSE_BUFFER_ALREADY_REGISTERED = 0x72,
XE_GUC_RESPONSE_INVALID_GGTT_ADDRESS = 0x80,
XE_GUC_RESPONSE_PENDING_ACTION = 0x90,
+ XE_GUC_RESPONSE_CONTEXT_NOT_REGISTERED = 0x100,
+ XE_GUC_RESPONSE_CONTEXT_ALREADY_REGISTERED = 0X101,
XE_GUC_RESPONSE_INVALID_SIZE = 0x102,
XE_GUC_RESPONSE_MALFORMED_KLV = 0x103,
+ XE_GUC_RESPONSE_INVALID_CONTEXT = 0x104,
XE_GUC_RESPONSE_INVALID_KLV_KEY = 0x105,
XE_GUC_RESPONSE_DATA_TOO_LARGE = 0x106,
XE_GUC_RESPONSE_VF_MIGRATED = 0x107,
@@ -40,10 +47,11 @@ enum xe_guc_response_status {
XE_GUC_RESPONSE_CTB_NOT_REGISTERED = 0x304,
XE_GUC_RESPONSE_CTB_IN_USE = 0x305,
XE_GUC_RESPONSE_CTB_INVALID_DESC = 0x306,
+ XE_GUC_RESPONSE_HW_TIMEOUT = 0x30C,
XE_GUC_RESPONSE_CTB_SOURCE_INVALID_DESCRIPTOR = 0x30D,
XE_GUC_RESPONSE_CTB_DESTINATION_INVALID_DESCRIPTOR = 0x30E,
XE_GUC_RESPONSE_INVALID_CONFIG_STATE = 0x30F,
- XE_GUC_RESPONSE_STATUS_GENERIC_FAIL = 0xF000,
+ XE_GUC_RESPONSE_GENERIC_FAIL = 0xF000,
};
enum xe_guc_load_status {
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index 461ecdfdb742..6b362695d6b6 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -24,6 +24,7 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_
struct xe_device *xe = xe_bo_device(bo);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
u32 column, row;
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
/* TODO: Maybe rewrite so we can traverse the bo addresses sequentially,
* by writing dpt/ggtt in a different order?
@@ -33,10 +34,9 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_
u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
for (row = 0; row < height; row++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
- iosys_map_wr(map, *dpt_ofs, u64, pte);
+ iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
*dpt_ofs += 8;
src_idx -= src_stride;
}
@@ -56,17 +56,15 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs,
{
struct xe_device *xe = xe_bo_device(bo);
struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
- u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index)
- = ggtt->pt_ops->pte_encode_bo;
u32 column, row;
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
for (row = 0; row < height; row++) {
u32 src_idx = src_stride * row + bo_ofs;
for (column = 0; column < width; column++) {
- iosys_map_wr(map, *dpt_ofs, u64,
- pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]));
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
+ iosys_map_wr(map, *dpt_ofs, u64, pte | addr);
*dpt_ofs += 8;
src_idx++;
@@ -130,13 +128,13 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb,
return PTR_ERR(dpt);
if (view->type == I915_GTT_VIEW_NORMAL) {
+ u64 pte = xe_ggtt_encode_pte_flags(ggtt, bo, xe->pat.idx[XE_CACHE_NONE]);
u32 x;
for (x = 0; x < size / XE_PAGE_SIZE; x++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, x * XE_PAGE_SIZE, XE_PAGE_SIZE);
- iosys_map_wr(&dpt->vmap, x * 8, u64, pte);
+ iosys_map_wr(&dpt->vmap, x * 8, u64, pte | addr);
}
} else if (view->type == I915_GTT_VIEW_REMAPPED) {
const struct intel_remapped_info *remap_info = &view->remapped;
@@ -174,15 +172,15 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo
{
struct xe_device *xe = xe_bo_device(bo);
u32 column, row;
+ u64 pte = ggtt->pt_ops->pte_encode_flags(bo, xe->pat.idx[XE_CACHE_NONE]);
for (column = 0; column < width; column++) {
u32 src_idx = src_stride * (height - 1) + column + bo_ofs;
for (row = 0; row < height; row++) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
- xe->pat.idx[XE_CACHE_NONE]);
+ u64 addr = xe_bo_addr(bo, src_idx * XE_PAGE_SIZE, XE_PAGE_SIZE);
- ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte);
+ ggtt->pt_ops->ggtt_set_pte(ggtt, *ggtt_ofs, pte | addr);
*ggtt_ofs += XE_PAGE_SIZE;
src_idx -= src_stride;
}
@@ -200,14 +198,15 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
struct drm_gem_object *obj = intel_fb_bo(&fb->base);
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = to_xe_device(fb->base.dev);
- struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt;
+ struct xe_tile *tile0 = xe_device_get_root_tile(xe);
+ struct xe_ggtt *ggtt = tile0->mem.ggtt;
u32 align;
int ret;
/* TODO: Consider sharing framebuffer mapping?
* embed i915_vma inside intel_framebuffer
*/
- xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_get_noresume(xe);
ret = mutex_lock_interruptible(&ggtt->lock);
if (ret)
goto out;
@@ -216,29 +215,22 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K)
align = max_t(u32, align, SZ_64K);
- if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) {
- vma->node = bo->ggtt_node[ggtt->tile->id];
+ if (bo->ggtt_node[tile0->id] && view->type == I915_GTT_VIEW_NORMAL) {
+ vma->node = bo->ggtt_node[tile0->id];
} else if (view->type == I915_GTT_VIEW_NORMAL) {
- u32 x, size = bo->ttm.base.size;
-
vma->node = xe_ggtt_node_init(ggtt);
if (IS_ERR(vma->node)) {
ret = PTR_ERR(vma->node);
goto out_unlock;
}
- ret = xe_ggtt_node_insert_locked(vma->node, size, align, 0);
+ ret = xe_ggtt_node_insert_locked(vma->node, bo->size, align, 0);
if (ret) {
xe_ggtt_node_fini(vma->node);
goto out_unlock;
}
- for (x = 0; x < size; x += XE_PAGE_SIZE) {
- u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x,
- xe->pat.idx[XE_CACHE_NONE]);
-
- ggtt->pt_ops->ggtt_set_pte(ggtt, vma->node->base.start + x, pte);
- }
+ xe_ggtt_map_bo(ggtt, vma->node, bo, xe->pat.idx[XE_CACHE_NONE]);
} else {
u32 i, ggtt_ofs;
const struct intel_rotation_info *rot_info = &view->rotated;
@@ -272,7 +264,7 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb,
out_unlock:
mutex_unlock(&ggtt->lock);
out:
- xe_pm_runtime_put(tile_to_xe(ggtt->tile));
+ xe_pm_runtime_put(xe);
return ret;
}
@@ -349,7 +341,7 @@ err:
static void __xe_unpin_fb_vma(struct i915_vma *vma)
{
- u8 tile_id = vma->node->ggtt->tile->id;
+ u8 tile_id = xe_device_get_root_tile(xe_bo_device(vma->bo))->id;
if (!refcount_dec_and_test(&vma->ref))
return;
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 413600a4fd21..af47ce34102c 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -88,12 +88,8 @@ initial_plane_bo(struct xe_device *xe,
base = round_down(plane_config->base, page_size);
if (IS_DGFX(xe)) {
- u64 __iomem *gte = tile0->mem.ggtt->gsm;
- u64 pte;
+ u64 pte = xe_ggtt_read_pte(tile0->mem.ggtt, base);
- gte += base / XE_PAGE_SIZE;
-
- pte = ioread64(gte);
if (!(pte & XE_GGTT_PTE_DM)) {
drm_err(&xe->drm,
"Initial plane programming missing DM bit\n");
diff --git a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
index 5394a1373a6b..ef2bf984723f 100644
--- a/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_mchbar_regs.h
@@ -40,6 +40,7 @@
#define PCU_CR_PACKAGE_RAPL_LIMIT XE_REG(MCHBAR_MIRROR_BASE_SNB + 0x59a0)
#define PWR_LIM_VAL REG_GENMASK(14, 0)
#define PWR_LIM_EN REG_BIT(15)
+#define PWR_LIM REG_GENMASK(15, 0)
#define PWR_LIM_TIME REG_GENMASK(23, 17)
#define PWR_LIM_TIME_X REG_GENMASK(23, 22)
#define PWR_LIM_TIME_Y REG_GENMASK(21, 17)
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a79ad2da070c..e693a50706f8 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -97,4 +97,7 @@
#define OAM_STATUS(base) XE_REG((base) + OAM_STATUS_OFFSET)
#define OAM_MMIO_TRG(base) XE_REG((base) + OAM_MMIO_TRG_OFFSET)
+#define OAM_COMPRESSION_T3_CONTROL XE_REG(0x1c2e00)
+#define OAM_LAT_MEASURE_ENABLE REG_BIT(4)
+
#endif
diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
index c556a04670ee..fb097607b86c 100644
--- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h
@@ -18,12 +18,10 @@
#define PVC_GT0_PLATFORM_ENERGY_STATUS XE_REG(0x28106c)
#define PVC_GT0_PACKAGE_POWER_SKU XE_REG(0x281080)
-#define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120)
#define BMG_FAN_1_SPEED XE_REG(0x138140)
#define BMG_FAN_2_SPEED XE_REG(0x138170)
#define BMG_FAN_3_SPEED XE_REG(0x1381a0)
#define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0)
#define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434)
-#define BMG_PLATFORM_ENERGY_STATUS XE_REG(0x138458)
#endif /* _XE_PCODE_REGS_H_ */
diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h
index f45abcd96ba8..b0efd9b48d1e 100644
--- a/drivers/gpu/drm/xe/regs/xe_pmt.h
+++ b/drivers/gpu/drm/xe/regs/xe_pmt.h
@@ -10,6 +10,11 @@
#define BMG_PMT_BASE_OFFSET 0xDB000
#define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE_OFFSET)
+#define PUNIT_TELEMETRY_GUID XE_REG(BMG_DISCOVERY_OFFSET + 0x4)
+#define BMG_ENERGY_STATUS_PMT_OFFSET (0x30)
+#define ENERGY_PKG REG_GENMASK64(31, 0)
+#define ENERGY_CARD REG_GENMASK64(63, 32)
+
#define BMG_TELEMETRY_BASE_OFFSET 0xE0000
#define BMG_TELEMETRY_OFFSET (SOC_BASE + BMG_TELEMETRY_BASE_OFFSET)
diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c
index 378dcd0fb414..77ca1ab527ec 100644
--- a/drivers/gpu/drm/xe/tests/xe_bo.c
+++ b/drivers/gpu/drm/xe/tests/xe_bo.c
@@ -514,9 +514,9 @@ static int shrink_test_run_device(struct xe_device *xe)
* other way around, they may not be subject to swapping...
*/
if (alloced < purgeable) {
- xe_ttm_tt_account_subtract(&xe_tt->ttm);
+ xe_ttm_tt_account_subtract(xe, &xe_tt->ttm);
xe_tt->purgeable = true;
- xe_ttm_tt_account_add(&xe_tt->ttm);
+ xe_ttm_tt_account_add(xe, &xe_tt->ttm);
bo->ttm.priority = 0;
spin_lock(&bo->ttm.bdev->lru_lock);
ttm_bo_move_to_lru_tail(&bo->ttm);
diff --git a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
index 6faffcd74869..537766cdd882 100644
--- a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
+++ b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c
@@ -42,10 +42,8 @@ static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device *
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bo->ggtt_node[tile->id]);
KUNIT_ASSERT_EQ(test, 0,
- drm_mm_insert_node_in_range(&ggtt->mm,
- &bo->ggtt_node[tile->id]->base,
- bo->size, SZ_4K,
- 0, 0, U64_MAX, 0));
+ xe_ggtt_node_insert(bo->ggtt_node[tile->id],
+ bo->size, SZ_4K));
}
return bo;
@@ -67,8 +65,9 @@ static int guc_buf_test_init(struct kunit *test)
ggtt = xe_device_get_root_tile(test->priv)->mem.ggtt;
guc = &xe_device_get_gt(test->priv, 0)->uc.guc;
- drm_mm_init(&ggtt->mm, DUT_GGTT_START, DUT_GGTT_SIZE);
- mutex_init(&ggtt->lock);
+ KUNIT_ASSERT_EQ(test, 0,
+ xe_ggtt_init_kunit(ggtt, DUT_GGTT_START,
+ DUT_GGTT_START + DUT_GGTT_SIZE));
kunit_activate_static_stub(test, xe_managed_bo_create_pin_map,
replacement_xe_managed_bo_create_pin_map);
diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c
index 1d3e2e50c355..baccb657bd05 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci.c
@@ -12,49 +12,48 @@
#include <kunit/test-bug.h>
#include <kunit/visibility.h>
+static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc)
+{
+ snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%u.%02u %s",
+ param->verx100 / 100, param->verx100 % 100, param->name);
+}
+
+KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc);
+KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc);
+
/**
- * xe_call_for_each_graphics_ip - Iterate over all recognized graphics IPs
- * @xe_fn: Function to call for each device.
+ * xe_pci_graphics_ip_gen_param - Generate graphics struct xe_ip parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
*
- * This function iterates over the descriptors for all graphics IPs recognized
- * by the driver and calls @xe_fn: for each one of them.
+ * This function prepares struct xe_ip parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
+ *
+ * Return: pointer to the next parameter or NULL if no more parameters
*/
-void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn)
+const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc)
{
- const struct xe_graphics_desc *desc, *last = NULL;
-
- for (int i = 0; i < ARRAY_SIZE(graphics_ips); i++) {
- desc = graphics_ips[i].desc;
- if (desc == last)
- continue;
-
- xe_fn(desc);
- last = desc;
- }
+ return graphics_ip_gen_params(prev, desc);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip);
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_graphics_ip_gen_param);
/**
- * xe_call_for_each_media_ip - Iterate over all recognized media IPs
- * @xe_fn: Function to call for each device.
+ * xe_pci_media_ip_gen_param - Generate media struct xe_ip parameters
+ * @prev: the pointer to the previous parameter to iterate from or NULL
+ * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE
+ *
+ * This function prepares struct xe_ip parameter.
+ *
+ * To be used only as a parameter generator function in &KUNIT_CASE_PARAM.
*
- * This function iterates over the descriptors for all media IPs recognized
- * by the driver and calls @xe_fn: for each one of them.
+ * Return: pointer to the next parameter or NULL if no more parameters
*/
-void xe_call_for_each_media_ip(xe_media_fn xe_fn)
+const void *xe_pci_media_ip_gen_param(const void *prev, char *desc)
{
- const struct xe_media_desc *desc, *last = NULL;
-
- for (int i = 0; i < ARRAY_SIZE(media_ips); i++) {
- desc = media_ips[i].desc;
- if (desc == last)
- continue;
-
- xe_fn(desc);
- last = desc;
- }
+ return media_ip_gen_params(prev, desc);
}
-EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip);
+EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param);
static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type,
u32 *ver, u32 *revid)
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c
index 744a37583d2d..95fed41f7ff2 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.c
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c
@@ -14,9 +14,10 @@
#include "xe_pci_test.h"
#include "xe_pci_types.h"
-static void check_graphics_ip(const struct xe_graphics_desc *graphics)
+static void check_graphics_ip(struct kunit *test)
{
- struct kunit *test = kunit_get_current_test();
+ const struct xe_ip *param = test->param_value;
+ const struct xe_graphics_desc *graphics = param->desc;
u64 mask = graphics->hw_engine_mask;
/* RCS, CCS, and BCS engines are allowed on the graphics IP */
@@ -28,9 +29,10 @@ static void check_graphics_ip(const struct xe_graphics_desc *graphics)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void check_media_ip(const struct xe_media_desc *media)
+static void check_media_ip(struct kunit *test)
{
- struct kunit *test = kunit_get_current_test();
+ const struct xe_ip *param = test->param_value;
+ const struct xe_media_desc *media = param->desc;
u64 mask = media->hw_engine_mask;
/* VCS, VECS and GSCCS engines are allowed on the media IP */
@@ -42,19 +44,9 @@ static void check_media_ip(const struct xe_media_desc *media)
KUNIT_ASSERT_EQ(test, mask, 0);
}
-static void xe_gmdid_graphics_ip(struct kunit *test)
-{
- xe_call_for_each_graphics_ip(check_graphics_ip);
-}
-
-static void xe_gmdid_media_ip(struct kunit *test)
-{
- xe_call_for_each_media_ip(check_media_ip);
-}
-
static struct kunit_case xe_pci_tests[] = {
- KUNIT_CASE(xe_gmdid_graphics_ip),
- KUNIT_CASE(xe_gmdid_media_ip),
+ KUNIT_CASE_PARAM(check_graphics_ip, xe_pci_graphics_ip_gen_param),
+ KUNIT_CASE_PARAM(check_media_ip, xe_pci_media_ip_gen_param),
{}
};
diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h
index ede46800aff1..3a1df7a5e291 100644
--- a/drivers/gpu/drm/xe/tests/xe_pci_test.h
+++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h
@@ -12,15 +12,6 @@
#include "xe_sriov_types.h"
struct xe_device;
-struct xe_graphics_desc;
-struct xe_media_desc;
-
-typedef int (*xe_device_fn)(struct xe_device *);
-typedef void (*xe_graphics_fn)(const struct xe_graphics_desc *);
-typedef void (*xe_media_fn)(const struct xe_media_desc *);
-
-void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn);
-void xe_call_for_each_media_ip(xe_media_fn xe_fn);
struct xe_pci_fake_data {
enum xe_sriov_mode sriov_mode;
@@ -34,6 +25,8 @@ struct xe_pci_fake_data {
int xe_pci_fake_device_init(struct xe_device *xe);
+const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc);
+const void *xe_pci_media_ip_gen_param(const void *prev, char *desc);
const void *xe_pci_live_device_gen_param(const void *prev, char *desc);
#endif
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 7aa2c17825da..4e39188a021a 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -336,15 +336,13 @@ static void xe_evict_flags(struct ttm_buffer_object *tbo,
/* struct xe_ttm_tt - Subclassed ttm_tt for xe */
struct xe_ttm_tt {
struct ttm_tt ttm;
- /** @xe - The xe device */
- struct xe_device *xe;
struct sg_table sgt;
struct sg_table *sg;
/** @purgeable: Whether the content of the pages of @ttm is purgeable. */
bool purgeable;
};
-static int xe_tt_map_sg(struct ttm_tt *tt)
+static int xe_tt_map_sg(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
unsigned long num_pages = tt->num_pages;
@@ -359,13 +357,13 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
ret = sg_alloc_table_from_pages_segment(&xe_tt->sgt, tt->pages,
num_pages, 0,
(u64)num_pages << PAGE_SHIFT,
- xe_sg_segment_size(xe_tt->xe->drm.dev),
+ xe_sg_segment_size(xe->drm.dev),
GFP_KERNEL);
if (ret)
return ret;
xe_tt->sg = &xe_tt->sgt;
- ret = dma_map_sgtable(xe_tt->xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
+ ret = dma_map_sgtable(xe->drm.dev, xe_tt->sg, DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC);
if (ret) {
sg_free_table(xe_tt->sg);
@@ -376,12 +374,12 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
return 0;
}
-static void xe_tt_unmap_sg(struct ttm_tt *tt)
+static void xe_tt_unmap_sg(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->sg) {
- dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ dma_unmap_sgtable(xe->drm.dev, xe_tt->sg,
DMA_BIDIRECTIONAL, 0);
sg_free_table(xe_tt->sg);
xe_tt->sg = NULL;
@@ -400,24 +398,24 @@ struct sg_table *xe_bo_sg(struct xe_bo *bo)
* Account ttm pages against the device shrinker's shrinkable and
* purgeable counts.
*/
-static void xe_ttm_tt_account_add(struct ttm_tt *tt)
+static void xe_ttm_tt_account_add(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->purgeable)
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, tt->num_pages);
+ xe_shrinker_mod_pages(xe->mem.shrinker, 0, tt->num_pages);
else
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, tt->num_pages, 0);
+ xe_shrinker_mod_pages(xe->mem.shrinker, tt->num_pages, 0);
}
-static void xe_ttm_tt_account_subtract(struct ttm_tt *tt)
+static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt)
{
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
if (xe_tt->purgeable)
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, 0, -(long)tt->num_pages);
+ xe_shrinker_mod_pages(xe->mem.shrinker, 0, -(long)tt->num_pages);
else
- xe_shrinker_mod_pages(xe_tt->xe->mem.shrinker, -(long)tt->num_pages, 0);
+ xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0);
}
static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
@@ -436,7 +434,6 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
return NULL;
tt = &xe_tt->ttm;
- xe_tt->xe = xe;
extra_pages = 0;
if (xe_bo_needs_ccs_pages(bo))
@@ -527,21 +524,23 @@ static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt,
return err;
xe_tt->purgeable = false;
- xe_ttm_tt_account_add(tt);
+ xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt);
return 0;
}
static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt)
{
+ struct xe_device *xe = ttm_to_xe_device(ttm_dev);
+
if ((tt->page_flags & TTM_TT_FLAG_EXTERNAL) &&
!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))
return;
- xe_tt_unmap_sg(tt);
+ xe_tt_unmap_sg(xe, tt);
ttm_pool_free(&ttm_dev->pool, tt);
- xe_ttm_tt_account_subtract(tt);
+ xe_ttm_tt_account_subtract(xe, tt);
}
static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt)
@@ -789,7 +788,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
/* Bo creation path, moving to system or TT. */
if ((!old_mem && ttm) && !handle_system_ccs) {
if (new_mem->mem_type == XE_PL_TT)
- ret = xe_tt_map_sg(ttm);
+ ret = xe_tt_map_sg(xe, ttm);
if (!ret)
ttm_bo_move_null(ttm_bo, new_mem);
goto out;
@@ -812,7 +811,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
(!ttm && ttm_bo->type == ttm_bo_type_device);
if (new_mem->mem_type == XE_PL_TT) {
- ret = xe_tt_map_sg(ttm);
+ ret = xe_tt_map_sg(xe, ttm);
if (ret)
goto out;
}
@@ -958,7 +957,7 @@ out:
if (timeout < 0)
ret = timeout;
- xe_tt_unmap_sg(ttm_bo->ttm);
+ xe_tt_unmap_sg(xe, ttm_bo->ttm);
}
return ret;
@@ -968,6 +967,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
struct ttm_buffer_object *bo,
unsigned long *scanned)
{
+ struct xe_device *xe = ttm_to_xe_device(bo->bdev);
long lret;
/* Fake move to system, without copying data. */
@@ -982,7 +982,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
if (lret)
return lret;
- xe_tt_unmap_sg(bo->ttm);
+ xe_tt_unmap_sg(xe, bo->ttm);
ttm_bo_move_null(bo, new_resource);
}
@@ -993,7 +993,7 @@ static long xe_bo_shrink_purge(struct ttm_operation_ctx *ctx,
.allow_move = false});
if (lret > 0)
- xe_ttm_tt_account_subtract(bo->ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm);
return lret;
}
@@ -1043,7 +1043,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
struct xe_ttm_tt *xe_tt = container_of(tt, struct xe_ttm_tt, ttm);
struct ttm_place place = {.mem_type = bo->resource->mem_type};
struct xe_bo *xe_bo = ttm_to_xe_bo(bo);
- struct xe_device *xe = xe_tt->xe;
+ struct xe_device *xe = ttm_to_xe_device(bo->bdev);
bool needs_rpm;
long lret = 0L;
@@ -1080,7 +1080,7 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo,
xe_pm_runtime_put(xe);
if (lret > 0)
- xe_ttm_tt_account_subtract(tt);
+ xe_ttm_tt_account_subtract(xe, tt);
out_unref:
xe_bo_put(xe_bo);
@@ -1381,7 +1381,8 @@ int xe_bo_dma_unmap_pinned(struct xe_bo *bo)
ttm_bo->sg = NULL;
xe_tt->sg = NULL;
} else if (xe_tt->sg) {
- dma_unmap_sgtable(xe_tt->xe->drm.dev, xe_tt->sg,
+ dma_unmap_sgtable(ttm_to_xe_device(ttm_bo->bdev)->drm.dev,
+ xe_tt->sg,
DMA_BIDIRECTIONAL, 0);
sg_free_table(xe_tt->sg);
xe_tt->sg = NULL;
@@ -2293,7 +2294,7 @@ int xe_bo_pin_external(struct xe_bo *bo)
ttm_bo_pin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_subtract(bo->ttm.ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2341,7 +2342,7 @@ int xe_bo_pin(struct xe_bo *bo)
ttm_bo_pin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_subtract(bo->ttm.ttm);
+ xe_ttm_tt_account_subtract(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2377,7 +2378,7 @@ void xe_bo_unpin_external(struct xe_bo *bo)
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_add(bo->ttm.ttm);
+ xe_ttm_tt_account_add(xe, bo->ttm.ttm);
/*
* FIXME: If we always use the reserve / unreserve functions for locking
@@ -2409,7 +2410,7 @@ void xe_bo_unpin(struct xe_bo *bo)
}
ttm_bo_unpin(&bo->ttm);
if (bo->ttm.ttm && ttm_tt_is_populated(bo->ttm.ttm))
- xe_ttm_tt_account_add(bo->ttm.ttm);
+ xe_ttm_tt_account_add(xe, bo->ttm.ttm);
}
/**
@@ -2992,6 +2993,14 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo)
if (IS_DGFX(xe) && (bo->flags & XE_BO_FLAG_SYSTEM))
return false;
+ /*
+ * Compression implies coh_none, therefore we know for sure that WB
+ * memory can't currently use compression, which is likely one of the
+ * common cases.
+ */
+ if (bo->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)
+ return false;
+
return true;
}
@@ -3067,7 +3076,7 @@ void xe_bo_put(struct xe_bo *bo)
#endif
for_each_tile(tile, xe_bo_device(bo), id)
if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt)
- might_lock(&bo->ggtt_node[id]->ggtt->lock);
+ xe_ggtt_might_lock(bo->ggtt_node[id]->ggtt);
drm_gem_object_put(&bo->ttm.base);
}
}
diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c
index ed3746d32b27..7484ce55a303 100644
--- a/drivers/gpu/drm/xe/xe_bo_evict.c
+++ b/drivers/gpu/drm/xe/xe_bo_evict.c
@@ -197,9 +197,7 @@ static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo)
if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile)))
continue;
- mutex_lock(&tile->mem.ggtt->lock);
- xe_ggtt_map_bo(tile->mem.ggtt, bo);
- mutex_unlock(&tile->mem.ggtt->lock);
+ xe_ggtt_map_bo_unlocked(tile->mem.ggtt, bo);
}
}
diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
index cb9f175c89a1..8ec1ff1e4e80 100644
--- a/drivers/gpu/drm/xe/xe_configfs.c
+++ b/drivers/gpu/drm/xe/xe_configfs.c
@@ -3,14 +3,19 @@
* Copyright © 2025 Intel Corporation
*/
+#include <linux/bitops.h>
#include <linux/configfs.h>
+#include <linux/find.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/string.h>
#include "xe_configfs.h"
#include "xe_module.h"
+#include "xe_hw_engine_types.h"
+
/**
* DOC: Xe Configfs
*
@@ -48,6 +53,30 @@
* # echo 1 > /sys/kernel/config/xe/0000:03:00.0/survivability_mode
* # echo 0000:03:00.0 > /sys/bus/pci/drivers/xe/bind (Enters survivability mode if supported)
*
+ * Allowed engines:
+ * ----------------
+ *
+ * Allow only a set of engine(s) to be available, disabling the other engines
+ * even if they are available in hardware. This is applied after HW fuses are
+ * considered on each tile. Examples:
+ *
+ * Allow only one render and one copy engines, nothing else::
+ *
+ * # echo 'rcs0,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
+ *
+ * Allow only compute engines and first copy engine::
+ *
+ * # echo 'ccs*,bcs0' > /sys/kernel/config/xe/0000:03:00.0/engines_allowed
+ *
+ * Note that the engine names are the per-GT hardware names. On multi-tile
+ * platforms, writing ``rcs0,bcs0`` to this file would allow the first render
+ * and copy engines on each tile.
+ *
+ * The requested configuration may not be supported by the platform and driver
+ * may fail to probe. For example: if at least one copy engine is expected to be
+ * available for migrations, but it's disabled. This is intended for debugging
+ * purposes only.
+ *
* Remove devices
* ==============
*
@@ -60,11 +89,30 @@ struct xe_config_device {
struct config_group group;
bool survivability_mode;
+ u64 engines_allowed;
/* protects attributes */
struct mutex lock;
};
+struct engine_info {
+ const char *cls;
+ u64 mask;
+};
+
+/* Some helpful macros to aid on the sizing of buffer allocation when parsing */
+#define MAX_ENGINE_CLASS_CHARS 5
+#define MAX_ENGINE_INSTANCE_CHARS 2
+
+static const struct engine_info engine_info[] = {
+ { .cls = "rcs", .mask = XE_HW_ENGINE_RCS_MASK },
+ { .cls = "bcs", .mask = XE_HW_ENGINE_BCS_MASK },
+ { .cls = "vcs", .mask = XE_HW_ENGINE_VCS_MASK },
+ { .cls = "vecs", .mask = XE_HW_ENGINE_VECS_MASK },
+ { .cls = "ccs", .mask = XE_HW_ENGINE_CCS_MASK },
+ { .cls = "gsccs", .mask = XE_HW_ENGINE_GSCCS_MASK },
+};
+
static struct xe_config_device *to_xe_config_device(struct config_item *item)
{
return container_of(to_config_group(item), struct xe_config_device, group);
@@ -94,10 +142,96 @@ static ssize_t survivability_mode_store(struct config_item *item, const char *pa
return len;
}
+static ssize_t engines_allowed_show(struct config_item *item, char *page)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ char *p = page;
+
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ u64 mask = engine_info[i].mask;
+
+ if ((dev->engines_allowed & mask) == mask) {
+ p += sprintf(p, "%s*\n", engine_info[i].cls);
+ } else if (mask & dev->engines_allowed) {
+ u16 bit0 = __ffs64(mask), bit;
+
+ mask &= dev->engines_allowed;
+
+ for_each_set_bit(bit, (const unsigned long *)&mask, 64)
+ p += sprintf(p, "%s%u\n", engine_info[i].cls,
+ bit - bit0);
+ }
+ }
+
+ return p - page;
+}
+
+static bool lookup_engine_mask(const char *pattern, u64 *mask)
+{
+ for (size_t i = 0; i < ARRAY_SIZE(engine_info); i++) {
+ u8 instance;
+ u16 bit;
+
+ if (!str_has_prefix(pattern, engine_info[i].cls))
+ continue;
+
+ pattern += strlen(engine_info[i].cls);
+
+ if (!strcmp(pattern, "*")) {
+ *mask = engine_info[i].mask;
+ return true;
+ }
+
+ if (kstrtou8(pattern, 10, &instance))
+ return false;
+
+ bit = __ffs64(engine_info[i].mask) + instance;
+ if (bit >= fls64(engine_info[i].mask))
+ return false;
+
+ *mask = BIT_ULL(bit);
+ return true;
+ }
+
+ return false;
+}
+
+static ssize_t engines_allowed_store(struct config_item *item, const char *page,
+ size_t len)
+{
+ struct xe_config_device *dev = to_xe_config_device(item);
+ size_t patternlen, p;
+ u64 mask, val = 0;
+
+ for (p = 0; p < len; p += patternlen + 1) {
+ char buf[MAX_ENGINE_CLASS_CHARS + MAX_ENGINE_INSTANCE_CHARS + 1];
+
+ patternlen = strcspn(page + p, ",\n");
+ if (patternlen >= sizeof(buf))
+ return -EINVAL;
+
+ memcpy(buf, page + p, patternlen);
+ buf[patternlen] = '\0';
+
+ if (!lookup_engine_mask(buf, &mask))
+ return -EINVAL;
+
+ val |= mask;
+ }
+
+ mutex_lock(&dev->lock);
+ dev->engines_allowed = val;
+ mutex_unlock(&dev->lock);
+
+ return len;
+}
+
CONFIGFS_ATTR(, survivability_mode);
+CONFIGFS_ATTR(, engines_allowed);
static struct configfs_attribute *xe_config_device_attrs[] = {
&attr_survivability_mode,
+ &attr_engines_allowed,
NULL,
};
@@ -139,6 +273,9 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro
if (!dev)
return ERR_PTR(-ENOMEM);
+ /* Default values */
+ dev->engines_allowed = U64_MAX;
+
config_group_init_type_name(&dev->group, name, &xe_config_device_type);
mutex_init(&dev->lock);
@@ -226,6 +363,29 @@ void xe_configfs_clear_survivability_mode(struct pci_dev *pdev)
config_item_put(&dev->group.cg_item);
}
+/**
+ * xe_configfs_get_engines_allowed - get engine allowed mask from configfs
+ * @pdev: pci device
+ *
+ * Find the configfs group that belongs to the pci device and return
+ * the mask of engines allowed to be used.
+ *
+ * Return: engine mask with allowed engines
+ */
+u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev)
+{
+ struct xe_config_device *dev = configfs_find_group(pdev);
+ u64 engines_allowed;
+
+ if (!dev)
+ return U64_MAX;
+
+ engines_allowed = dev->engines_allowed;
+ config_item_put(&dev->group.cg_item);
+
+ return engines_allowed;
+}
+
int __init xe_configfs_init(void)
{
struct config_group *root = &xe_configfs.su_group;
diff --git a/drivers/gpu/drm/xe/xe_configfs.h b/drivers/gpu/drm/xe/xe_configfs.h
index d7d041ec2611..fb8764008089 100644
--- a/drivers/gpu/drm/xe/xe_configfs.h
+++ b/drivers/gpu/drm/xe/xe_configfs.h
@@ -5,6 +5,7 @@
#ifndef _XE_CONFIGFS_H_
#define _XE_CONFIGFS_H_
+#include <linux/limits.h>
#include <linux/types.h>
struct pci_dev;
@@ -14,11 +15,13 @@ int xe_configfs_init(void);
void xe_configfs_exit(void);
bool xe_configfs_get_survivability_mode(struct pci_dev *pdev);
void xe_configfs_clear_survivability_mode(struct pci_dev *pdev);
+u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev);
#else
-static inline int xe_configfs_init(void) { return 0; };
-static inline void xe_configfs_exit(void) {};
-static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; };
-static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) {};
+static inline int xe_configfs_init(void) { return 0; }
+static inline void xe_configfs_exit(void) { }
+static inline bool xe_configfs_get_survivability_mode(struct pci_dev *pdev) { return false; }
+static inline void xe_configfs_clear_survivability_mode(struct pci_dev *pdev) { }
+static inline u64 xe_configfs_get_engines_allowed(struct pci_dev *pdev) { return U64_MAX; }
#endif
#endif
diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c
index d0503959a8ed..d83cd6ed3fa8 100644
--- a/drivers/gpu/drm/xe/xe_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_debugfs.c
@@ -191,6 +191,41 @@ static const struct file_operations wedged_mode_fops = {
.write = wedged_mode_set,
};
+static ssize_t atomic_svm_timeslice_ms_show(struct file *f, char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ char buf[32];
+ int len = 0;
+
+ len = scnprintf(buf, sizeof(buf), "%d\n", xe->atomic_svm_timeslice_ms);
+
+ return simple_read_from_buffer(ubuf, size, pos, buf, len);
+}
+
+static ssize_t atomic_svm_timeslice_ms_set(struct file *f,
+ const char __user *ubuf,
+ size_t size, loff_t *pos)
+{
+ struct xe_device *xe = file_inode(f)->i_private;
+ u32 atomic_svm_timeslice_ms;
+ ssize_t ret;
+
+ ret = kstrtouint_from_user(ubuf, size, 0, &atomic_svm_timeslice_ms);
+ if (ret)
+ return ret;
+
+ xe->atomic_svm_timeslice_ms = atomic_svm_timeslice_ms;
+
+ return size;
+}
+
+static const struct file_operations atomic_svm_timeslice_ms_fops = {
+ .owner = THIS_MODULE,
+ .read = atomic_svm_timeslice_ms_show,
+ .write = atomic_svm_timeslice_ms_set,
+};
+
void xe_debugfs_register(struct xe_device *xe)
{
struct ttm_device *bdev = &xe->ttm;
@@ -211,6 +246,9 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("wedged_mode", 0600, root, xe,
&wedged_mode_fops);
+ debugfs_create_file("atomic_svm_timeslice_ms", 0600, root, xe,
+ &atomic_svm_timeslice_ms_fops);
+
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {
man = ttm_manager_type(bdev, mem_type);
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index b615d8ed38a2..8cfcfff250ca 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -402,9 +402,6 @@ static void xe_device_destroy(struct drm_device *dev, void *dummy)
if (xe->unordered_wq)
destroy_workqueue(xe->unordered_wq);
- if (!IS_ERR_OR_NULL(xe->mem.shrinker))
- xe_shrinker_destroy(xe->mem.shrinker);
-
if (xe->destroy_wq)
destroy_workqueue(xe->destroy_wq);
@@ -438,13 +435,14 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
if (err)
goto err;
- xe->mem.shrinker = xe_shrinker_create(xe);
- if (IS_ERR(xe->mem.shrinker))
- return ERR_CAST(xe->mem.shrinker);
+ err = xe_shrinker_create(xe);
+ if (err)
+ goto err;
xe->info.devid = pdev->device;
xe->info.revid = pdev->revision;
xe->info.force_execlist = xe_modparam.force_execlist;
+ xe->atomic_svm_timeslice_ms = 5;
err = xe_irq_init(xe);
if (err)
@@ -800,18 +798,19 @@ int xe_device_probe(struct xe_device *xe)
* be performed.
*/
xe_gt_mmio_init(gt);
- }
- for_each_tile(tile, xe, id) {
if (IS_SRIOV_VF(xe)) {
- xe_guc_comm_init_early(&tile->primary_gt->uc.guc);
- err = xe_gt_sriov_vf_bootstrap(tile->primary_gt);
+ xe_guc_comm_init_early(&gt->uc.guc);
+ err = xe_gt_sriov_vf_bootstrap(gt);
if (err)
return err;
- err = xe_gt_sriov_vf_query_config(tile->primary_gt);
+ err = xe_gt_sriov_vf_query_config(gt);
if (err)
return err;
}
+ }
+
+ for_each_tile(tile, xe, id) {
err = xe_ggtt_init_early(tile->mem.ggtt);
if (err)
return err;
@@ -1164,7 +1163,8 @@ void xe_device_declare_wedged(struct xe_device *xe)
/* Notify userspace of wedged device */
drm_dev_wedged_event(&xe->drm,
- DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET);
+ DRM_WEDGE_RECOVERY_REBIND | DRM_WEDGE_RECOVERY_BUS_RESET,
+ NULL);
}
for_each_gt(gt, xe, id)
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index 0bc3bc8e6803..e4da797a984b 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -195,6 +195,8 @@ void xe_device_declare_wedged(struct xe_device *xe);
struct xe_file *xe_file_get(struct xe_file *xef);
void xe_file_put(struct xe_file *xef);
+int xe_is_injection_active(void);
+
/*
* Occasionally it is seen that the G2H worker starts running after a delay of more than
* a second even after being queued and activated by the Linux workqueue subsystem. This
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index 12e6f98fea6a..003afb279a5e 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -499,6 +499,10 @@ struct xe_device {
const struct xe_pat_table_entry *table;
/** @pat.n_entries: Number of PAT entries */
int n_entries;
+ /** @pat.ats_entry: PAT entry for PCIe ATS responses */
+ const struct xe_pat_table_entry *pat_ats;
+ /** @pat.pta_entry: PAT entry for page table accesses */
+ const struct xe_pat_table_entry *pat_pta;
u32 idx[__XE_CACHE_LEVEL_COUNT];
} pat;
@@ -573,6 +577,9 @@ struct xe_device {
/** @pmu: performance monitoring unit */
struct xe_pmu pmu;
+ /** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */
+ u32 atomic_svm_timeslice_ms;
+
#ifdef TEST_VM_OPS_ERROR
/**
* @vm_inject_error_position: inject errors at different places in VM
diff --git a/drivers/gpu/drm/xe/xe_drv.h b/drivers/gpu/drm/xe/xe_drv.h
index d61650d4aa0b..95242a375e54 100644
--- a/drivers/gpu/drm/xe/xe_drv.h
+++ b/drivers/gpu/drm/xe/xe_drv.h
@@ -9,7 +9,7 @@
#include <drm/drm_drv.h>
#define DRIVER_NAME "xe"
-#define DRIVER_DESC "Intel Xe Graphics"
+#define DRIVER_DESC "Intel Xe2 Graphics"
/* Interface history:
*
diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
index 7062115909f2..7b11fa1356f0 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.c
+++ b/drivers/gpu/drm/xe/xe_ggtt.c
@@ -5,6 +5,7 @@
#include "xe_ggtt.h"
+#include <kunit/visibility.h>
#include <linux/fault-inject.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/sizes.h>
@@ -22,12 +23,13 @@
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
-#include "xe_gt_sriov_vf.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_mmio.h"
#include "xe_pm.h"
+#include "xe_res_cursor.h"
#include "xe_sriov.h"
+#include "xe_tile_sriov_vf.h"
#include "xe_wa.h"
#include "xe_wopcm.h"
@@ -64,13 +66,9 @@
* give us the correct placement for free.
*/
-static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index)
+static u64 xelp_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
{
- u64 pte;
-
- pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
- pte |= XE_PAGE_PRESENT;
+ u64 pte = XE_PAGE_PRESENT;
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
pte |= XE_GGTT_PTE_DM;
@@ -78,13 +76,12 @@ static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
return pte;
}
-static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
- u16 pat_index)
+static u64 xelpg_ggtt_pte_flags(struct xe_bo *bo, u16 pat_index)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pte;
- pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index);
+ pte = xelp_ggtt_pte_flags(bo, pat_index);
xe_assert(xe, pat_index <= 3);
@@ -149,8 +146,9 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
xe_tile_assert(ggtt->tile, start < end);
if (ggtt->scratch)
- scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
- pat_index);
+ scratch_pte = xe_bo_addr(ggtt->scratch, 0, XE_PAGE_SIZE) |
+ ggtt->pt_ops->pte_encode_flags(ggtt->scratch,
+ pat_index);
else
scratch_pte = 0;
@@ -160,6 +158,22 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
}
}
+/**
+ * xe_ggtt_alloc - Allocate a GGTT for a given &xe_tile
+ * @tile: &xe_tile
+ *
+ * Allocates a &xe_ggtt for a given tile.
+ *
+ * Return: &xe_ggtt on success, or NULL when out of memory.
+ */
+struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*ggtt), GFP_KERNEL);
+ if (ggtt)
+ ggtt->tile = tile;
+ return ggtt;
+}
+
static void ggtt_fini_early(struct drm_device *drm, void *arg)
{
struct xe_ggtt *ggtt = arg;
@@ -176,6 +190,13 @@ static void ggtt_fini(void *arg)
ggtt->scratch = NULL;
}
+#ifdef CONFIG_LOCKDEP
+void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
+{
+ might_lock(&ggtt->lock);
+}
+#endif
+
static void primelockdep(struct xe_ggtt *ggtt)
{
if (!IS_ENABLED(CONFIG_LOCKDEP))
@@ -187,20 +208,36 @@ static void primelockdep(struct xe_ggtt *ggtt)
}
static const struct xe_ggtt_pt_ops xelp_pt_ops = {
- .pte_encode_bo = xelp_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelp_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_ops = {
- .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelpg_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte,
};
static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
- .pte_encode_bo = xelpg_ggtt_pte_encode_bo,
+ .pte_encode_flags = xelpg_ggtt_pte_flags,
.ggtt_set_pte = xe_ggtt_set_pte_and_flush,
};
+static void __xe_ggtt_init_early(struct xe_ggtt *ggtt, u32 reserved)
+{
+ drm_mm_init(&ggtt->mm, reserved,
+ ggtt->size - reserved);
+ mutex_init(&ggtt->lock);
+ primelockdep(ggtt);
+}
+
+int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size)
+{
+ ggtt->size = size;
+ __xe_ggtt_init_early(ggtt, reserved);
+ return 0;
+}
+EXPORT_SYMBOL_IF_KUNIT(xe_ggtt_init_kunit);
+
/**
* xe_ggtt_init_early - Early GGTT initialization
* @ggtt: the &xe_ggtt to be initialized
@@ -219,7 +256,7 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
unsigned int gsm_size;
int err;
- if (IS_SRIOV_VF(xe))
+ if (IS_SRIOV_VF(xe) || GRAPHICS_VERx100(xe) >= 1250)
gsm_size = SZ_8M; /* GGTT is expected to be 4GiB */
else
gsm_size = probe_gsm_size(pdev);
@@ -247,18 +284,14 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
ggtt->pt_ops = &xelp_pt_ops;
ggtt->wq = alloc_workqueue("xe-ggtt-wq", 0, WQ_MEM_RECLAIM);
-
- drm_mm_init(&ggtt->mm, xe_wopcm_size(xe),
- ggtt->size - xe_wopcm_size(xe));
- mutex_init(&ggtt->lock);
- primelockdep(ggtt);
+ __xe_ggtt_init_early(ggtt, xe_wopcm_size(xe));
err = drmm_add_action_or_reset(&xe->drm, ggtt_fini_early, ggtt);
if (err)
return err;
if (IS_SRIOV_VF(xe)) {
- err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
+ err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile);
if (err)
return err;
}
@@ -429,16 +462,17 @@ static void xe_ggtt_dump_node(struct xe_ggtt *ggtt,
}
/**
- * xe_ggtt_node_insert_balloon - prevent allocation of specified GGTT addresses
+ * xe_ggtt_node_insert_balloon_locked - prevent allocation of specified GGTT addresses
* @node: the &xe_ggtt_node to hold reserved GGTT node
* @start: the starting GGTT address of the reserved region
* @end: then end GGTT address of the reserved region
*
- * Use xe_ggtt_node_remove_balloon() to release a reserved GGTT node.
+ * To be used in cases where ggtt->lock is already taken.
+ * Use xe_ggtt_node_remove_balloon_locked() to release a reserved GGTT node.
*
* Return: 0 on success or a negative error code on failure.
*/
-int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
+int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node, u64 start, u64 end)
{
struct xe_ggtt *ggtt = node->ggtt;
int err;
@@ -447,14 +481,13 @@ int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
xe_tile_assert(ggtt->tile, IS_ALIGNED(start, XE_PAGE_SIZE));
xe_tile_assert(ggtt->tile, IS_ALIGNED(end, XE_PAGE_SIZE));
xe_tile_assert(ggtt->tile, !drm_mm_node_allocated(&node->base));
+ lockdep_assert_held(&ggtt->lock);
node->base.color = 0;
node->base.start = start;
node->base.size = end - start;
- mutex_lock(&ggtt->lock);
err = drm_mm_reserve_node(&ggtt->mm, &node->base);
- mutex_unlock(&ggtt->lock);
if (xe_gt_WARN(ggtt->tile->primary_gt, err,
"Failed to balloon GGTT %#llx-%#llx (%pe)\n",
@@ -466,27 +499,72 @@ int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node, u64 start, u64 end)
}
/**
- * xe_ggtt_node_remove_balloon - release a reserved GGTT region
+ * xe_ggtt_node_remove_balloon_locked - release a reserved GGTT region
* @node: the &xe_ggtt_node with reserved GGTT region
*
- * See xe_ggtt_node_insert_balloon() for details.
+ * To be used in cases where ggtt->lock is already taken.
+ * See xe_ggtt_node_insert_balloon_locked() for details.
*/
-void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node)
+void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node)
{
- if (!node || !node->ggtt)
+ if (!xe_ggtt_node_allocated(node))
return;
- if (!drm_mm_node_allocated(&node->base))
- goto free_node;
+ lockdep_assert_held(&node->ggtt->lock);
xe_ggtt_dump_node(node->ggtt, &node->base, "remove-balloon");
- mutex_lock(&node->ggtt->lock);
drm_mm_remove_node(&node->base);
- mutex_unlock(&node->ggtt->lock);
+}
-free_node:
- xe_ggtt_node_fini(node);
+static void xe_ggtt_assert_fit(struct xe_ggtt *ggtt, u64 start, u64 size)
+{
+ struct xe_tile *tile = ggtt->tile;
+ struct xe_device *xe = tile_to_xe(tile);
+ u64 __maybe_unused wopcm = xe_wopcm_size(xe);
+
+ xe_tile_assert(tile, start >= wopcm);
+ xe_tile_assert(tile, start + size < ggtt->size - wopcm);
+}
+
+/**
+ * xe_ggtt_shift_nodes_locked - Shift GGTT nodes to adjust for a change in usable address range.
+ * @ggtt: the &xe_ggtt struct instance
+ * @shift: change to the location of area provisioned for current VF
+ *
+ * This function moves all nodes from the GGTT VM, to a temp list. These nodes are expected
+ * to represent allocations in range formerly assigned to current VF, before the range changed.
+ * When the GGTT VM is completely clear of any nodes, they are re-added with shifted offsets.
+ *
+ * The function has no ability of failing - because it shifts existing nodes, without
+ * any additional processing. If the nodes were successfully existing at the old address,
+ * they will do the same at the new one. A fail inside this function would indicate that
+ * the list of nodes was either already damaged, or that the shift brings the address range
+ * outside of valid bounds. Both cases justify an assert rather than error code.
+ */
+void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift)
+{
+ struct xe_tile *tile __maybe_unused = ggtt->tile;
+ struct drm_mm_node *node, *tmpn;
+ LIST_HEAD(temp_list_head);
+
+ lockdep_assert_held(&ggtt->lock);
+
+ if (IS_ENABLED(CONFIG_DRM_XE_DEBUG))
+ drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm)
+ xe_ggtt_assert_fit(ggtt, node->start + shift, node->size);
+
+ drm_mm_for_each_node_safe(node, tmpn, &ggtt->mm) {
+ drm_mm_remove_node(node);
+ list_add(&node->node_list, &temp_list_head);
+ }
+
+ list_for_each_entry_safe(node, tmpn, &temp_list_head, node_list) {
+ list_del(&node->node_list);
+ node->start += shift;
+ drm_mm_reserve_node(&ggtt->mm, node);
+ xe_tile_assert(tile, drm_mm_node_allocated(node));
+ }
}
/**
@@ -537,12 +615,12 @@ int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align)
* xe_ggtt_node_init - Initialize %xe_ggtt_node struct
* @ggtt: the &xe_ggtt where the new node will later be inserted/reserved.
*
- * This function will allocated the struct %xe_ggtt_node and return it's pointer.
+ * This function will allocate the struct %xe_ggtt_node and return its pointer.
* This struct will then be freed after the node removal upon xe_ggtt_node_remove()
- * or xe_ggtt_node_remove_balloon().
+ * or xe_ggtt_node_remove_balloon_locked().
* Having %xe_ggtt_node struct allocated doesn't mean that the node is already allocated
* in GGTT. Only the xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
- * xe_ggtt_node_insert_balloon() will ensure the node is inserted or reserved in GGTT.
+ * xe_ggtt_node_insert_balloon_locked() will ensure the node is inserted or reserved in GGTT.
*
* Return: A pointer to %xe_ggtt_node struct on success. An ERR_PTR otherwise.
**/
@@ -564,7 +642,7 @@ struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt)
* @node: the &xe_ggtt_node to be freed
*
* If anything went wrong with either xe_ggtt_node_insert(), xe_ggtt_node_insert_locked(),
- * or xe_ggtt_node_insert_balloon(); and this @node is not going to be reused, then,
+ * or xe_ggtt_node_insert_balloon_locked(); and this @node is not going to be reused, then,
* this function needs to be called to free the %xe_ggtt_node struct
**/
void xe_ggtt_node_fini(struct xe_ggtt_node *node)
@@ -589,26 +667,59 @@ bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node)
/**
* xe_ggtt_map_bo - Map the BO into GGTT
* @ggtt: the &xe_ggtt where node will be mapped
+ * @node: the &xe_ggtt_node where this BO is mapped
* @bo: the &xe_bo to be mapped
+ * @pat_index: Which pat_index to use.
*/
-void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
+void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
+ struct xe_bo *bo, u16 pat_index)
{
- u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
- u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
- u64 start;
- u64 offset, pte;
- if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id]))
+ u64 start, pte, end;
+ struct xe_res_cursor cur;
+
+ if (XE_WARN_ON(!node))
return;
- start = bo->ggtt_node[ggtt->tile->id]->base.start;
+ start = node->base.start;
+ end = start + bo->size;
+
+ pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index);
+ if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
+ xe_assert(xe_bo_device(bo), bo->ttm.ttm);
+
+ for (xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &cur);
+ cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
+ ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
+ pte | xe_res_dma(&cur));
+ } else {
+ /* Prepend GPU offset */
+ pte |= vram_region_gpu_offset(bo->ttm.resource);
- for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
- pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
- ggtt->pt_ops->ggtt_set_pte(ggtt, start + offset, pte);
+ for (xe_res_first(bo->ttm.resource, 0, bo->size, &cur);
+ cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE))
+ ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining,
+ pte + cur.start);
}
}
+/**
+ * xe_ggtt_map_bo_unlocked - Restore a mapping of a BO into GGTT
+ * @ggtt: the &xe_ggtt where node will be mapped
+ * @bo: the &xe_bo to be mapped
+ *
+ * This is used to restore a GGTT mapping after suspend.
+ */
+void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo)
+{
+ u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
+
+ mutex_lock(&ggtt->lock);
+ xe_ggtt_map_bo(ggtt, bo->ggtt_node[ggtt->tile->id], bo, pat_index);
+ mutex_unlock(&ggtt->lock);
+}
+
static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end)
{
@@ -645,7 +756,10 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
xe_ggtt_node_fini(bo->ggtt_node[tile_id]);
bo->ggtt_node[tile_id] = NULL;
} else {
- xe_ggtt_map_bo(ggtt, bo);
+ u16 cache_mode = bo->flags & XE_BO_FLAG_NEEDS_UC ? XE_CACHE_NONE : XE_CACHE_WB;
+ u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[cache_mode];
+
+ xe_ggtt_map_bo(ggtt, bo->ggtt_node[tile_id], bo, pat_index);
}
mutex_unlock(&ggtt->lock);
@@ -841,3 +955,30 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer
return total;
}
+
+/**
+ * xe_ggtt_encode_pte_flags - Get PTE encoding flags for BO
+ * @ggtt: &xe_ggtt
+ * @bo: &xe_bo
+ * @pat_index: The pat_index for the PTE.
+ *
+ * This function returns the pte_flags for a given BO, without address.
+ * It's used for DPT to fill a GGTT mapped BO with a linear lookup table.
+ */
+u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt,
+ struct xe_bo *bo, u16 pat_index)
+{
+ return ggtt->pt_ops->pte_encode_flags(bo, pat_index);
+}
+
+/**
+ * xe_ggtt_read_pte - Read a PTE from the GGTT
+ * @ggtt: &xe_ggtt
+ * @offset: the offset for which the mapping should be read.
+ *
+ * Used by testcases, and by display reading out an inherited bios FB.
+ */
+u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset)
+{
+ return ioread64(ggtt->gsm + (offset / XE_PAGE_SIZE));
+}
diff --git a/drivers/gpu/drm/xe/xe_ggtt.h b/drivers/gpu/drm/xe/xe_ggtt.h
index 27e7d67de004..fbe1e397d05d 100644
--- a/drivers/gpu/drm/xe/xe_ggtt.h
+++ b/drivers/gpu/drm/xe/xe_ggtt.h
@@ -9,22 +9,28 @@
#include "xe_ggtt_types.h"
struct drm_printer;
+struct xe_tile;
+struct xe_ggtt *xe_ggtt_alloc(struct xe_tile *tile);
int xe_ggtt_init_early(struct xe_ggtt *ggtt);
+int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size);
int xe_ggtt_init(struct xe_ggtt *ggtt);
struct xe_ggtt_node *xe_ggtt_node_init(struct xe_ggtt *ggtt);
void xe_ggtt_node_fini(struct xe_ggtt_node *node);
-int xe_ggtt_node_insert_balloon(struct xe_ggtt_node *node,
- u64 start, u64 size);
-void xe_ggtt_node_remove_balloon(struct xe_ggtt_node *node);
+int xe_ggtt_node_insert_balloon_locked(struct xe_ggtt_node *node,
+ u64 start, u64 size);
+void xe_ggtt_node_remove_balloon_locked(struct xe_ggtt_node *node);
+void xe_ggtt_shift_nodes_locked(struct xe_ggtt *ggtt, s64 shift);
int xe_ggtt_node_insert(struct xe_ggtt_node *node, u32 size, u32 align);
int xe_ggtt_node_insert_locked(struct xe_ggtt_node *node,
u32 size, u32 align, u32 mm_flags);
void xe_ggtt_node_remove(struct xe_ggtt_node *node, bool invalidate);
bool xe_ggtt_node_allocated(const struct xe_ggtt_node *node);
-void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
+void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node,
+ struct xe_bo *bo, u16 pat_index);
+void xe_ggtt_map_bo_unlocked(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo);
int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
u64 start, u64 end);
@@ -38,4 +44,14 @@ u64 xe_ggtt_print_holes(struct xe_ggtt *ggtt, u64 alignment, struct drm_printer
void xe_ggtt_assign(const struct xe_ggtt_node *node, u16 vfid);
#endif
+#ifndef CONFIG_LOCKDEP
+static inline void xe_ggtt_might_lock(struct xe_ggtt *ggtt)
+{ }
+#else
+void xe_ggtt_might_lock(struct xe_ggtt *ggtt);
+#endif
+
+u64 xe_ggtt_encode_pte_flags(struct xe_ggtt *ggtt, struct xe_bo *bo, u16 pat_index);
+u64 xe_ggtt_read_pte(struct xe_ggtt *ggtt, u64 offset);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_ggtt_types.h b/drivers/gpu/drm/xe/xe_ggtt_types.h
index cb02b7994a9a..c5e999d58ff2 100644
--- a/drivers/gpu/drm/xe/xe_ggtt_types.h
+++ b/drivers/gpu/drm/xe/xe_ggtt_types.h
@@ -74,8 +74,8 @@ struct xe_ggtt_node {
* Which can vary from platform to platform.
*/
struct xe_ggtt_pt_ops {
- /** @pte_encode_bo: Encode PTE address for a given BO */
- u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
+ /** @pte_encode_flags: Encode PTE flags for a given BO */
+ u64 (*pte_encode_flags)(struct xe_bo *bo, u16 pat_index);
/** @ggtt_set_pte: Directly write into GGTT's PTE */
void (*ggtt_set_pte)(struct xe_ggtt *ggtt, u64 addr, u64 pte);
};
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 0e5d243c9451..9752a38c0162 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -118,7 +118,7 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
}
- xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
+ xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF);
xe_force_wake_put(gt_to_fw(gt), fw_ref);
}
@@ -417,6 +417,8 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
+ xe_mocs_init_early(gt);
+
return 0;
}
@@ -634,8 +636,6 @@ int xe_gt_init(struct xe_gt *gt)
if (err)
return err;
- xe_mocs_init_early(gt);
-
err = xe_gt_sysfs_init(gt);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_gt_debugfs.c b/drivers/gpu/drm/xe/xe_gt_debugfs.c
index 119a55bb7580..848618acdca8 100644
--- a/drivers/gpu/drm/xe/xe_gt_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_debugfs.c
@@ -122,24 +122,6 @@ static int powergate_info(struct xe_gt *gt, struct drm_printer *p)
return ret;
}
-static int force_reset(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset_async(gt);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
-static int force_reset_sync(struct xe_gt *gt, struct drm_printer *p)
-{
- xe_pm_runtime_get(gt_to_xe(gt));
- xe_gt_reset(gt);
- xe_pm_runtime_put(gt_to_xe(gt));
-
- return 0;
-}
-
static int sa_info(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_tile *tile = gt_to_tile(gt);
@@ -306,8 +288,6 @@ static int hwconfig(struct xe_gt *gt, struct drm_printer *p)
* - without access to the PF specific data
*/
static const struct drm_info_list vf_safe_debugfs_list[] = {
- {"force_reset", .show = xe_gt_debugfs_simple_show, .data = force_reset},
- {"force_reset_sync", .show = xe_gt_debugfs_simple_show, .data = force_reset_sync},
{"sa_info", .show = xe_gt_debugfs_simple_show, .data = sa_info},
{"topology", .show = xe_gt_debugfs_simple_show, .data = topology},
{"ggtt", .show = xe_gt_debugfs_simple_show, .data = ggtt},
@@ -332,6 +312,78 @@ static const struct drm_info_list pf_only_debugfs_list[] = {
{"steering", .show = xe_gt_debugfs_simple_show, .data = steering},
};
+static ssize_t write_to_gt_call(const char __user *userbuf, size_t count, loff_t *ppos,
+ void (*call)(struct xe_gt *), struct xe_gt *gt)
+{
+ bool yes;
+ int ret;
+
+ if (*ppos)
+ return -EINVAL;
+ ret = kstrtobool_from_user(userbuf, count, &yes);
+ if (ret < 0)
+ return ret;
+ if (yes)
+ call(gt);
+ return count;
+}
+
+static void force_reset(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
+ xe_gt_reset_async(gt);
+ xe_pm_runtime_put(xe);
+}
+
+static ssize_t force_reset_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, force_reset, gt);
+}
+
+static int force_reset_show(struct seq_file *s, void *unused)
+{
+ struct xe_gt *gt = s->private;
+
+ force_reset(gt); /* to be deprecated! */
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_reset);
+
+static void force_reset_sync(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_pm_runtime_get(xe);
+ xe_gt_reset(gt);
+ xe_pm_runtime_put(xe);
+}
+
+static ssize_t force_reset_sync_write(struct file *file,
+ const char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct xe_gt *gt = s->private;
+
+ return write_to_gt_call(userbuf, count, ppos, force_reset_sync, gt);
+}
+
+static int force_reset_sync_show(struct seq_file *s, void *unused)
+{
+ struct xe_gt *gt = s->private;
+
+ force_reset_sync(gt); /* to be deprecated! */
+ return 0;
+}
+DEFINE_SHOW_STORE_ATTRIBUTE(force_reset_sync);
+
void xe_gt_debugfs_register(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -355,6 +407,10 @@ void xe_gt_debugfs_register(struct xe_gt *gt)
*/
root->d_inode->i_private = gt;
+ /* VF safe */
+ debugfs_create_file("force_reset", 0600, root, gt, &force_reset_fops);
+ debugfs_create_file("force_reset_sync", 0600, root, gt, &force_reset_sync_fops);
+
drm_debugfs_create_files(vf_safe_debugfs_list,
ARRAY_SIZE(vf_safe_debugfs_list),
root, minor);
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 10622ca471a2..3522865c67c9 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -14,6 +14,7 @@
#include "abi/guc_actions_abi.h"
#include "xe_bo.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_gt_stats.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
@@ -68,31 +69,8 @@ static bool access_is_atomic(enum access_type access_type)
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
- return BIT(tile->id) & vma->tile_present &&
- !(BIT(tile->id) & vma->tile_invalidated);
-}
-
-static bool vma_matches(struct xe_vma *vma, u64 page_addr)
-{
- if (page_addr > xe_vma_end(vma) - 1 ||
- page_addr + SZ_4K - 1 < xe_vma_start(vma))
- return false;
-
- return true;
-}
-
-static struct xe_vma *lookup_vma(struct xe_vm *vm, u64 page_addr)
-{
- struct xe_vma *vma = NULL;
-
- if (vm->usm.last_fault_vma) { /* Fast lookup */
- if (vma_matches(vm->usm.last_fault_vma, page_addr))
- vma = vm->usm.last_fault_vma;
- }
- if (!vma)
- vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
-
- return vma;
+ return xe_vm_has_valid_gpu_mapping(tile, vma->tile_present,
+ vma->tile_invalidated);
}
static int xe_pf_begin(struct drm_exec *exec, struct xe_vma *vma,
@@ -143,7 +121,7 @@ static int handle_vma_pagefault(struct xe_gt *gt, struct xe_vma *vma,
trace_xe_vma_pagefault(vma);
- /* Check if VMA is valid */
+ /* Check if VMA is valid, opportunistic check only */
if (vma_is_valid(tile, vma) && !atomic)
return 0;
@@ -180,7 +158,6 @@ retry_userptr:
dma_fence_wait(fence, false);
dma_fence_put(fence);
- vma->tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
drm_exec_fini(&exec);
@@ -231,7 +208,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
goto unlock_vm;
}
- vma = lookup_vma(vm, pf->page_addr);
+ vma = xe_vm_find_vma_by_addr(vm, pf->page_addr);
if (!vma) {
err = -EINVAL;
goto unlock_vm;
@@ -266,22 +243,22 @@ static int send_pagefault_reply(struct xe_guc *guc,
return xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
-static void print_pagefault(struct xe_device *xe, struct pagefault *pf)
+static void print_pagefault(struct xe_gt *gt, struct pagefault *pf)
{
- drm_dbg(&xe->drm, "\n\tASID: %d\n"
- "\tVFID: %d\n"
- "\tPDATA: 0x%04x\n"
- "\tFaulted Address: 0x%08x%08x\n"
- "\tFaultType: %d\n"
- "\tAccessType: %d\n"
- "\tFaultLevel: %d\n"
- "\tEngineClass: %d %s\n"
- "\tEngineInstance: %d\n",
- pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
- lower_32_bits(pf->page_addr),
- pf->fault_type, pf->access_type, pf->fault_level,
- pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
- pf->engine_instance);
+ xe_gt_dbg(gt, "\n\tASID: %d\n"
+ "\tVFID: %d\n"
+ "\tPDATA: 0x%04x\n"
+ "\tFaulted Address: 0x%08x%08x\n"
+ "\tFaultType: %d\n"
+ "\tAccessType: %d\n"
+ "\tFaultLevel: %d\n"
+ "\tEngineClass: %d %s\n"
+ "\tEngineInstance: %d\n",
+ pf->asid, pf->vfid, pf->pdata, upper_32_bits(pf->page_addr),
+ lower_32_bits(pf->page_addr),
+ pf->fault_type, pf->access_type, pf->fault_level,
+ pf->engine_class, xe_hw_engine_class_to_str(pf->engine_class),
+ pf->engine_instance);
}
#define PF_MSG_LEN_DW 4
@@ -333,7 +310,6 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
{
struct xe_gt *gt = guc_to_gt(guc);
- struct xe_device *xe = gt_to_xe(gt);
struct pf_queue *pf_queue;
unsigned long flags;
u32 asid;
@@ -358,7 +334,7 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
pf_queue->num_dw;
queue_work(gt->usm.pf_wq, &pf_queue->worker);
} else {
- drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
+ xe_gt_warn(gt, "PageFault Queue full, shouldn't be possible\n");
}
spin_unlock_irqrestore(&pf_queue->lock, flags);
@@ -371,7 +347,6 @@ static void pf_queue_work_func(struct work_struct *w)
{
struct pf_queue *pf_queue = container_of(w, struct pf_queue, worker);
struct xe_gt *gt = pf_queue->gt;
- struct xe_device *xe = gt_to_xe(gt);
struct xe_guc_pagefault_reply reply = {};
struct pagefault pf = {};
unsigned long threshold;
@@ -382,9 +357,9 @@ static void pf_queue_work_func(struct work_struct *w)
while (get_pagefault(pf_queue, &pf)) {
ret = handle_pagefault(gt, &pf);
if (unlikely(ret)) {
- print_pagefault(xe, &pf);
+ print_pagefault(gt, &pf);
pf.fault_unsuccessful = 1;
- drm_dbg(&xe->drm, "Fault response: Unsuccessful %d\n", ret);
+ xe_gt_dbg(gt, "Fault response: Unsuccessful %pe\n", ERR_PTR(ret));
}
reply.dw0 = FIELD_PREP(PFR_VALID, 1) |
@@ -537,21 +512,21 @@ static int sub_granularity_in_byte(int val)
return (granularity_in_byte(val) / 32);
}
-static void print_acc(struct xe_device *xe, struct acc *acc)
+static void print_acc(struct xe_gt *gt, struct acc *acc)
{
- drm_warn(&xe->drm, "Access counter request:\n"
- "\tType: %s\n"
- "\tASID: %d\n"
- "\tVFID: %d\n"
- "\tEngine: %d:%d\n"
- "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
- "\tSub_Granularity Vector: 0x%08x\n"
- "\tVA Range base: 0x%016llx\n",
- acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
- acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
- granularity_in_byte(acc->granularity) / SZ_1K,
- sub_granularity_in_byte(acc->granularity) / SZ_1K,
- acc->sub_granularity, acc->va_range_base);
+ xe_gt_warn(gt, "Access counter request:\n"
+ "\tType: %s\n"
+ "\tASID: %d\n"
+ "\tVFID: %d\n"
+ "\tEngine: %d:%d\n"
+ "\tGranularity: 0x%x KB Region/ %d KB sub-granularity\n"
+ "\tSub_Granularity Vector: 0x%08x\n"
+ "\tVA Range base: 0x%016llx\n",
+ acc->access_type ? "AC_NTFY_VAL" : "AC_TRIG_VAL",
+ acc->asid, acc->vfid, acc->engine_class, acc->engine_instance,
+ granularity_in_byte(acc->granularity) / SZ_1K,
+ sub_granularity_in_byte(acc->granularity) / SZ_1K,
+ acc->sub_granularity, acc->va_range_base);
}
static struct xe_vma *get_acc_vma(struct xe_vm *vm, struct acc *acc)
@@ -649,7 +624,6 @@ static void acc_queue_work_func(struct work_struct *w)
{
struct acc_queue *acc_queue = container_of(w, struct acc_queue, worker);
struct xe_gt *gt = acc_queue->gt;
- struct xe_device *xe = gt_to_xe(gt);
struct acc acc = {};
unsigned long threshold;
int ret;
@@ -659,8 +633,8 @@ static void acc_queue_work_func(struct work_struct *w)
while (get_acc(acc_queue, &acc)) {
ret = handle_acc(gt, &acc);
if (unlikely(ret)) {
- print_acc(xe, &acc);
- drm_warn(&xe->drm, "ACC: Unsuccessful %d\n", ret);
+ print_acc(gt, &acc);
+ xe_gt_warn(gt, "ACC: Unsuccessful %pe\n", ERR_PTR(ret));
}
if (time_after(jiffies, threshold) &&
@@ -705,7 +679,7 @@ int xe_guc_access_counter_notify_handler(struct xe_guc *guc, u32 *msg, u32 len)
acc_queue->head = (acc_queue->head + len) % ACC_QUEUE_NUM_DW;
queue_work(gt->usm.acc_wq, &acc_queue->worker);
} else {
- drm_warn(&gt_to_xe(gt)->drm, "ACC Queue full, dropping ACC");
+ xe_gt_warn(gt, "ACC Queue full, dropping ACC\n");
}
spin_unlock(&acc_queue->lock);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 2420a548cacc..3556c41c041b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -1520,6 +1520,8 @@ int xe_gt_sriov_pf_config_set_lmem(struct xe_gt *gt, unsigned int vfid, u64 size
{
int err;
+ xe_gt_assert(gt, xe_device_has_lmtt(gt_to_xe(gt)));
+
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (vfid)
err = pf_provision_vf_lmem(gt, vfid, size);
@@ -1629,7 +1631,7 @@ int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid,
xe_gt_assert(gt, num_vfs);
xe_gt_assert(gt, !xe_gt_is_media_type(gt));
- if (!IS_DGFX(gt_to_xe(gt)))
+ if (!xe_device_has_lmtt(gt_to_xe(gt)))
return 0;
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
@@ -2163,7 +2165,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid)
valid_all = valid_all && valid_ggtt;
valid_any = valid_any || (valid_ggtt && is_primary);
- if (IS_DGFX(xe)) {
+ if (xe_device_has_lmtt(xe)) {
bool valid_lmem = pf_get_vf_config_lmem(primary_gt, vfid);
valid_any = valid_any || (valid_lmem && is_primary);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
index 0fe47f41b63c..13970d5a2867 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c
@@ -308,7 +308,7 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne
if (!xe_gt_is_media_type(gt)) {
debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare",
0644, parent, parent, &ggtt_fops);
- if (IS_DGFX(gt_to_xe(gt)))
+ if (xe_device_has_lmtt(gt_to_xe(gt)))
debugfs_create_file_unsafe(vfid ? "lmem_quota" : "lmem_spare",
0644, parent, parent, &lmem_fops);
}
@@ -558,7 +558,7 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root)
drm_debugfs_create_files(pf_ggtt_info,
ARRAY_SIZE(pf_ggtt_info),
pfdentry, minor);
- if (IS_DGFX(gt_to_xe(gt)))
+ if (xe_device_has_lmtt(gt_to_xe(gt)))
drm_debugfs_create_files(pf_lmem_info,
ARRAY_SIZE(pf_lmem_info),
pfdentry, minor);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
index a439261bf4d7..9b2fc9db55b8 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c
@@ -82,17 +82,17 @@ int xe_gt_sriov_vf_reset(struct xe_gt *gt)
}
static int guc_action_match_version(struct xe_guc *guc,
- u32 wanted_branch, u32 wanted_major, u32 wanted_minor,
- u32 *branch, u32 *major, u32 *minor, u32 *patch)
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found)
{
u32 request[VF2GUC_MATCH_VERSION_REQUEST_MSG_LEN] = {
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION,
GUC_ACTION_VF2GUC_MATCH_VERSION),
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted_branch) |
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted_major) |
- FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted_minor),
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_BRANCH, wanted->branch) |
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MAJOR, wanted->major) |
+ FIELD_PREP(VF2GUC_MATCH_VERSION_REQUEST_MSG_1_MINOR, wanted->minor),
};
u32 response[GUC_MAX_MMIO_MSG_LEN];
int ret;
@@ -106,120 +106,138 @@ static int guc_action_match_version(struct xe_guc *guc,
if (unlikely(FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_0_MBZ, response[0])))
return -EPROTO;
- *branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
- *major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
- *minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
- *patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
+ memset(found, 0, sizeof(struct xe_uc_fw_version));
+ found->branch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_BRANCH, response[1]);
+ found->major = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MAJOR, response[1]);
+ found->minor = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_MINOR, response[1]);
+ found->patch = FIELD_GET(VF2GUC_MATCH_VERSION_RESPONSE_MSG_1_PATCH, response[1]);
return 0;
}
-static void vf_minimum_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
+static int guc_action_match_version_any(struct xe_guc *guc,
+ struct xe_uc_fw_version *found)
+{
+ struct xe_uc_fw_version wanted = {
+ .branch = GUC_VERSION_BRANCH_ANY,
+ .major = GUC_VERSION_MAJOR_ANY,
+ .minor = GUC_VERSION_MINOR_ANY,
+ .patch = 0
+ };
+
+ return guc_action_match_version(guc, &wanted, found);
+}
+
+static void vf_minimum_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
{
struct xe_device *xe = gt_to_xe(gt);
+ memset(ver, 0, sizeof(struct xe_uc_fw_version));
+
switch (xe->info.platform) {
case XE_TIGERLAKE ... XE_PVC:
/* 1.1 this is current baseline for Xe driver */
- *branch = 0;
- *major = 1;
- *minor = 1;
+ ver->branch = 0;
+ ver->major = 1;
+ ver->minor = 1;
break;
default:
/* 1.2 has support for the GMD_ID KLV */
- *branch = 0;
- *major = 1;
- *minor = 2;
+ ver->branch = 0;
+ ver->major = 1;
+ ver->minor = 2;
break;
}
}
-static void vf_wanted_guc_version(struct xe_gt *gt, u32 *branch, u32 *major, u32 *minor)
+static void vf_wanted_guc_version(struct xe_gt *gt, struct xe_uc_fw_version *ver)
{
/* for now it's the same as minimum */
- return vf_minimum_guc_version(gt, branch, major, minor);
+ return vf_minimum_guc_version(gt, ver);
}
static int vf_handshake_with_guc(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version wanted = {0};
struct xe_guc *guc = &gt->uc.guc;
- u32 wanted_branch, wanted_major, wanted_minor;
- u32 branch, major, minor, patch;
+ bool old = false;
int err;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
/* select wanted version - prefer previous (if any) */
if (guc_version->major || guc_version->minor) {
- wanted_branch = guc_version->branch;
- wanted_major = guc_version->major;
- wanted_minor = guc_version->minor;
+ wanted = *guc_version;
+ old = true;
} else {
- vf_wanted_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
- xe_gt_assert(gt, wanted_major != GUC_VERSION_MAJOR_ANY);
+ vf_wanted_guc_version(gt, &wanted);
+ xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
+
+ /* First time we handshake, so record the minimum wanted */
+ gt->sriov.vf.wanted_guc_version = wanted;
}
- err = guc_action_match_version(guc, wanted_branch, wanted_major, wanted_minor,
- &branch, &major, &minor, &patch);
+ err = guc_action_match_version(guc, &wanted, guc_version);
if (unlikely(err))
goto fail;
- /* we don't support interface version change */
- if ((guc_version->major || guc_version->minor) &&
- (guc_version->branch != branch || guc_version->major != major ||
- guc_version->minor != minor)) {
- xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
- branch, major, minor, patch);
- xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
- guc_version->branch, guc_version->major,
- guc_version->minor, guc_version->patch);
- err = -EREMCHG;
- goto fail;
+ if (old) {
+ /* we don't support interface version change */
+ if (MAKE_GUC_VER_STRUCT(*guc_version) != MAKE_GUC_VER_STRUCT(wanted)) {
+ xe_gt_sriov_err(gt, "New GuC interface version detected: %u.%u.%u.%u\n",
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch);
+ xe_gt_sriov_info(gt, "Previously used version was: %u.%u.%u.%u\n",
+ wanted.branch, wanted.major,
+ wanted.minor, wanted.patch);
+ err = -EREMCHG;
+ goto fail;
+ } else {
+ /* version is unchanged, no need to re-verify it */
+ return 0;
+ }
}
/* illegal */
- if (major > wanted_major) {
+ if (guc_version->major > wanted.major) {
err = -EPROTO;
goto unsupported;
}
/* there's no fallback on major version. */
- if (major != wanted_major) {
+ if (guc_version->major != wanted.major) {
err = -ENOPKG;
goto unsupported;
}
/* check against minimum version supported by us */
- vf_minimum_guc_version(gt, &wanted_branch, &wanted_major, &wanted_minor);
- xe_gt_assert(gt, major != GUC_VERSION_MAJOR_ANY);
- if (major < wanted_major || (major == wanted_major && minor < wanted_minor)) {
+ vf_minimum_guc_version(gt, &wanted);
+ xe_gt_assert(gt, wanted.major != GUC_VERSION_MAJOR_ANY);
+ if (MAKE_GUC_VER_STRUCT(*guc_version) < MAKE_GUC_VER_STRUCT(wanted)) {
err = -ENOKEY;
goto unsupported;
}
xe_gt_sriov_dbg(gt, "using GuC interface version %u.%u.%u.%u\n",
- branch, major, minor, patch);
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch);
- guc_version->branch = branch;
- guc_version->major = major;
- guc_version->minor = minor;
- guc_version->patch = patch;
return 0;
unsupported:
xe_gt_sriov_err(gt, "Unsupported GuC version %u.%u.%u.%u (%pe)\n",
- branch, major, minor, patch, ERR_PTR(err));
+ guc_version->branch, guc_version->major,
+ guc_version->minor, guc_version->patch,
+ ERR_PTR(err));
fail:
xe_gt_sriov_err(gt, "Unable to confirm GuC version %u.%u (%pe)\n",
- wanted_major, wanted_minor, ERR_PTR(err));
+ wanted.major, wanted.minor, ERR_PTR(err));
/* try again with *any* just to query which version is supported */
- if (!guc_action_match_version(guc, GUC_VERSION_BRANCH_ANY,
- GUC_VERSION_MAJOR_ANY, GUC_VERSION_MINOR_ANY,
- &branch, &major, &minor, &patch))
+ if (!guc_action_match_version_any(guc, &wanted))
xe_gt_sriov_notice(gt, "GuC reports interface version %u.%u.%u.%u\n",
- branch, major, minor, patch);
+ wanted.branch, wanted.major, wanted.minor, wanted.patch);
return err;
}
@@ -250,6 +268,29 @@ int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_sriov_vf_guc_versions - Minimum required and found GuC ABI versions
+ * @gt: the &xe_gt
+ * @wanted: pointer to the xe_uc_fw_version to be filled with the wanted version
+ * @found: pointer to the xe_uc_fw_version to be filled with the found version
+ *
+ * This function is for VF use only and it can only be used after successful
+ * version handshake with the GuC.
+ */
+void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found)
+{
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+
+ if (wanted)
+ *wanted = gt->sriov.vf.wanted_guc_version;
+
+ if (found)
+ *found = gt->sriov.vf.guc_version;
+}
+
static int guc_action_vf_notify_resfix_done(struct xe_guc *guc)
{
u32 request[GUC_HXG_REQUEST_MSG_MIN_LEN] = {
@@ -415,6 +456,7 @@ static int vf_get_ggtt_info(struct xe_gt *gt)
xe_gt_sriov_dbg_verbose(gt, "GGTT %#llx-%#llx = %lluK\n",
start, start + size - 1, size / SZ_1K);
+ config->ggtt_shift = start - (s64)config->ggtt_base;
config->ggtt_base = start;
config->ggtt_size = size;
@@ -560,106 +602,56 @@ u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt)
return gt->sriov.vf.self_config.lmem_size;
}
-static struct xe_ggtt_node *
-vf_balloon_ggtt_node(struct xe_ggtt *ggtt, u64 start, u64 end)
-{
- struct xe_ggtt_node *node;
- int err;
-
- node = xe_ggtt_node_init(ggtt);
- if (IS_ERR(node))
- return node;
-
- err = xe_ggtt_node_insert_balloon(node, start, end);
- if (err) {
- xe_ggtt_node_fini(node);
- return ERR_PTR(err);
- }
-
- return node;
-}
-
-static int vf_balloon_ggtt(struct xe_gt *gt)
+/**
+ * xe_gt_sriov_vf_ggtt - VF GGTT configuration.
+ * @gt: the &xe_gt
+ *
+ * This function is for VF use only.
+ *
+ * Return: size of the GGTT assigned to VF.
+ */
+u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt)
{
- struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_ggtt *ggtt = tile->mem.ggtt;
- struct xe_device *xe = gt_to_xe(gt);
- u64 start, end;
-
- xe_gt_assert(gt, IS_SRIOV_VF(xe));
- xe_gt_assert(gt, !xe_gt_is_media_type(gt));
-
- if (!config->ggtt_size)
- return -ENODATA;
-
- /*
- * VF can only use part of the GGTT as allocated by the PF:
- *
- * WOPCM GUC_GGTT_TOP
- * |<------------ Total GGTT size ------------------>|
- *
- * VF GGTT base -->|<- size ->|
- *
- * +--------------------+----------+-----------------+
- * |////////////////////| block |\\\\\\\\\\\\\\\\\|
- * +--------------------+----------+-----------------+
- *
- * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
- */
-
- start = xe_wopcm_size(xe);
- end = config->ggtt_base;
- if (end != start) {
- tile->sriov.vf.ggtt_balloon[0] = vf_balloon_ggtt_node(ggtt, start, end);
- if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
- return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
- }
-
- start = config->ggtt_base + config->ggtt_size;
- end = GUC_GGTT_TOP;
- if (end != start) {
- tile->sriov.vf.ggtt_balloon[1] = vf_balloon_ggtt_node(ggtt, start, end);
- if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
- return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
- }
- }
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+ xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
- return 0;
+ return gt->sriov.vf.self_config.ggtt_size;
}
-static void deballoon_ggtt(struct drm_device *drm, void *arg)
+/**
+ * xe_gt_sriov_vf_ggtt_base - VF GGTT base offset.
+ * @gt: the &xe_gt
+ *
+ * This function is for VF use only.
+ *
+ * Return: base offset of the GGTT assigned to VF.
+ */
+u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt)
{
- struct xe_tile *tile = arg;
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, gt->sriov.vf.guc_version.major);
+ xe_gt_assert(gt, gt->sriov.vf.self_config.ggtt_size);
- xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[1]);
- xe_ggtt_node_remove_balloon(tile->sriov.vf.ggtt_balloon[0]);
+ return gt->sriov.vf.self_config.ggtt_base;
}
/**
- * xe_gt_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
- * @gt: the &xe_gt
+ * xe_gt_sriov_vf_ggtt_shift - Return shift in GGTT range due to VF migration
+ * @gt: the &xe_gt struct instance
*
* This function is for VF use only.
*
- * Return: 0 on success or a negative error code on failure.
+ * Return: The shift value; could be negative
*/
-int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt)
+s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt)
{
- struct xe_tile *tile = gt_to_tile(gt);
- struct xe_device *xe = tile_to_xe(tile);
- int err;
-
- if (xe_gt_is_media_type(gt))
- return 0;
+ struct xe_gt_sriov_vf_selfconfig *config = &gt->sriov.vf.self_config;
- err = vf_balloon_ggtt(gt);
- if (err)
- return err;
+ xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
+ xe_gt_assert(gt, !xe_gt_is_media_type(gt));
- return drmm_add_action_or_reset(&xe->drm, deballoon_ggtt, tile);
+ return config->ggtt_shift;
}
static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor)
@@ -1043,6 +1035,8 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p)
string_get_size(config->ggtt_size, 1, STRING_UNITS_2, buf, sizeof(buf));
drm_printf(p, "GGTT size:\t%llu (%s)\n", config->ggtt_size, buf);
+ drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift);
+
if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) {
string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf));
drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf);
@@ -1079,19 +1073,20 @@ void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p)
*/
void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p)
{
- struct xe_gt_sriov_vf_guc_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version *guc_version = &gt->sriov.vf.guc_version;
+ struct xe_uc_fw_version *wanted = &gt->sriov.vf.wanted_guc_version;
struct xe_gt_sriov_vf_relay_version *pf_version = &gt->sriov.vf.pf_version;
- u32 branch, major, minor;
+ struct xe_uc_fw_version ver;
xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt)));
drm_printf(p, "GuC ABI:\n");
- vf_minimum_guc_version(gt, &branch, &major, &minor);
- drm_printf(p, "\tbase:\t%u.%u.%u.*\n", branch, major, minor);
+ vf_minimum_guc_version(gt, &ver);
+ drm_printf(p, "\tbase:\t%u.%u.%u.*\n", ver.branch, ver.major, ver.minor);
- vf_wanted_guc_version(gt, &branch, &major, &minor);
- drm_printf(p, "\twanted:\t%u.%u.%u.*\n", branch, major, minor);
+ drm_printf(p, "\twanted:\t%u.%u.%u.*\n",
+ wanted->branch, wanted->major, wanted->minor);
drm_printf(p, "\thandshake:\t%u.%u.%u.%u\n",
guc_version->branch, guc_version->major,
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
index ba6c5d74e326..e0357f341a2d 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.h
@@ -11,19 +11,26 @@
struct drm_printer;
struct xe_gt;
struct xe_reg;
+struct xe_uc_fw_version;
int xe_gt_sriov_vf_reset(struct xe_gt *gt);
int xe_gt_sriov_vf_bootstrap(struct xe_gt *gt);
+void xe_gt_sriov_vf_guc_versions(struct xe_gt *gt,
+ struct xe_uc_fw_version *wanted,
+ struct xe_uc_fw_version *found);
int xe_gt_sriov_vf_query_config(struct xe_gt *gt);
int xe_gt_sriov_vf_connect(struct xe_gt *gt);
int xe_gt_sriov_vf_query_runtime(struct xe_gt *gt);
-int xe_gt_sriov_vf_prepare_ggtt(struct xe_gt *gt);
int xe_gt_sriov_vf_notify_resfix_done(struct xe_gt *gt);
void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt);
u32 xe_gt_sriov_vf_gmdid(struct xe_gt *gt);
u16 xe_gt_sriov_vf_guc_ids(struct xe_gt *gt);
u64 xe_gt_sriov_vf_lmem(struct xe_gt *gt);
+u64 xe_gt_sriov_vf_ggtt(struct xe_gt *gt);
+u64 xe_gt_sriov_vf_ggtt_base(struct xe_gt *gt);
+s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt);
+
u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg);
void xe_gt_sriov_vf_write32(struct xe_gt *gt, struct xe_reg reg, u32 val);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
index a57f13b5afcd..ef041679e9d4 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h
@@ -7,20 +7,7 @@
#define _XE_GT_SRIOV_VF_TYPES_H_
#include <linux/types.h>
-
-/**
- * struct xe_gt_sriov_vf_guc_version - GuC ABI version details.
- */
-struct xe_gt_sriov_vf_guc_version {
- /** @branch: branch version. */
- u8 branch;
- /** @major: major version. */
- u8 major;
- /** @minor: minor version. */
- u8 minor;
- /** @patch: patch version. */
- u8 patch;
-};
+#include "xe_uc_fw_types.h"
/**
* struct xe_gt_sriov_vf_relay_version - PF ABI version details.
@@ -40,6 +27,8 @@ struct xe_gt_sriov_vf_selfconfig {
u64 ggtt_base;
/** @ggtt_size: assigned size of the GGTT region. */
u64 ggtt_size;
+ /** @ggtt_shift: difference in ggtt_base on last migration */
+ s64 ggtt_shift;
/** @lmem_size: assigned size of the LMEM. */
u64 lmem_size;
/** @num_ctxs: assigned number of GuC submission context IDs. */
@@ -71,8 +60,10 @@ struct xe_gt_sriov_vf_runtime {
* struct xe_gt_sriov_vf - GT level VF virtualization data.
*/
struct xe_gt_sriov_vf {
+ /** @wanted_guc_version: minimum wanted GuC ABI version. */
+ struct xe_uc_fw_version wanted_guc_version;
/** @guc_version: negotiated GuC ABI version. */
- struct xe_gt_sriov_vf_guc_version guc_version;
+ struct xe_uc_fw_version guc_version;
/** @self_config: resource configurations. */
struct xe_gt_sriov_vf_selfconfig self_config;
/** @pf_version: negotiated VF/PF ABI version. */
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
index 084cbdeba8ea..6088df8e159c 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c
@@ -138,6 +138,14 @@ void xe_gt_tlb_invalidation_reset(struct xe_gt *gt)
int pending_seqno;
/*
+ * we can get here before the CTs are even initialized if we're wedging
+ * very early, in which case there are not going to be any pending
+ * fences so we can bail immediately.
+ */
+ if (!xe_guc_ct_initialized(&gt->uc.guc.ct))
+ return;
+
+ /*
* CT channel is already disabled at this point. No new TLB requests can
* appear.
*/
@@ -441,30 +449,6 @@ void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm)
}
/**
- * xe_gt_tlb_invalidation_vma - Issue a TLB invalidation on this GT for a VMA
- * @gt: GT structure
- * @fence: invalidation fence which will be signal on TLB invalidation
- * completion, can be NULL
- * @vma: VMA to invalidate
- *
- * Issue a range based TLB invalidation if supported, if not fallback to a full
- * TLB invalidation. Completion of TLB is asynchronous and caller can use
- * the invalidation fence to wait for completion.
- *
- * Return: Negative error code on error, 0 on success
- */
-int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma)
-{
- xe_gt_assert(gt, vma);
-
- return xe_gt_tlb_invalidation_range(gt, fence, xe_vma_start(vma),
- xe_vma_end(vma),
- xe_vma_vm(vma)->usm.asid);
-}
-
-/**
* xe_guc_tlb_invalidation_done_handler - TLB invalidation done handler
* @guc: guc
* @msg: message indicating TLB invalidation done
diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
index abe9b03d543e..31072dbcad8e 100644
--- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
+++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h
@@ -19,9 +19,6 @@ int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt);
void xe_gt_tlb_invalidation_reset(struct xe_gt *gt);
int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt);
-int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
- struct xe_gt_tlb_invalidation_fence *fence,
- struct xe_vma *vma);
void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm);
int xe_gt_tlb_invalidation_range(struct xe_gt *gt,
struct xe_gt_tlb_invalidation_fence *fence,
diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c
index 516c81e3b8dd..305939c69747 100644
--- a/drivers/gpu/drm/xe/xe_gt_topology.c
+++ b/drivers/gpu/drm/xe/xe_gt_topology.c
@@ -12,23 +12,20 @@
#include "regs/xe_gt_regs.h"
#include "xe_assert.h"
#include "xe_gt.h"
+#include "xe_gt_printk.h"
#include "xe_mmio.h"
#include "xe_wa.h"
-static void
-load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
+static void load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs,
+ const struct xe_reg regs[])
{
- va_list argp;
u32 fuse_val[XE_MAX_DSS_FUSE_REGS] = {};
int i;
- if (drm_WARN_ON(&gt_to_xe(gt)->drm, numregs > XE_MAX_DSS_FUSE_REGS))
- numregs = XE_MAX_DSS_FUSE_REGS;
+ xe_gt_assert(gt, numregs <= ARRAY_SIZE(fuse_val));
- va_start(argp, numregs);
for (i = 0; i < numregs; i++)
- fuse_val[i] = xe_mmio_read32(&gt->mmio, va_arg(argp, struct xe_reg));
- va_end(argp);
+ fuse_val[i] = xe_mmio_read32(&gt->mmio, regs[i]);
bitmap_from_arr32(mask, fuse_val, numregs * 32);
}
@@ -218,9 +215,19 @@ get_num_dss_regs(struct xe_device *xe, int *geometry_regs, int *compute_regs)
void
xe_gt_topology_init(struct xe_gt *gt)
{
+ static const struct xe_reg geometry_regs[] = {
+ XELP_GT_GEOMETRY_DSS_ENABLE,
+ XE2_GT_GEOMETRY_DSS_1,
+ XE2_GT_GEOMETRY_DSS_2,
+ };
+ static const struct xe_reg compute_regs[] = {
+ XEHP_GT_COMPUTE_DSS_ENABLE,
+ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
+ XE2_GT_COMPUTE_DSS_2,
+ };
+ int num_geometry_regs, num_compute_regs;
struct xe_device *xe = gt_to_xe(gt);
struct drm_printer p;
- int num_geometry_regs, num_compute_regs;
get_num_dss_regs(xe, &num_geometry_regs, &num_compute_regs);
@@ -228,23 +235,18 @@ xe_gt_topology_init(struct xe_gt *gt)
* Register counts returned shouldn't exceed the number of registers
* passed as parameters below.
*/
- drm_WARN_ON(&xe->drm, num_geometry_regs > 3);
- drm_WARN_ON(&xe->drm, num_compute_regs > 3);
+ xe_gt_assert(gt, num_geometry_regs <= ARRAY_SIZE(geometry_regs));
+ xe_gt_assert(gt, num_compute_regs <= ARRAY_SIZE(compute_regs));
load_dss_mask(gt, gt->fuse_topo.g_dss_mask,
- num_geometry_regs,
- XELP_GT_GEOMETRY_DSS_ENABLE,
- XE2_GT_GEOMETRY_DSS_1,
- XE2_GT_GEOMETRY_DSS_2);
- load_dss_mask(gt, gt->fuse_topo.c_dss_mask, num_compute_regs,
- XEHP_GT_COMPUTE_DSS_ENABLE,
- XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,
- XE2_GT_COMPUTE_DSS_2);
+ num_geometry_regs, geometry_regs);
+ load_dss_mask(gt, gt->fuse_topo.c_dss_mask,
+ num_compute_regs, compute_regs);
+
load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss, &gt->fuse_topo.eu_type);
load_l3_bank_mask(gt, gt->fuse_topo.l3_bank_mask);
- p = drm_dbg_printer(&gt_to_xe(gt)->drm, DRM_UT_DRIVER, "GT topology");
-
+ p = xe_gt_dbg_printer(gt);
xe_gt_topology_dump(gt, &p);
}
diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c
index bac5471a1a78..209e5d53c290 100644
--- a/drivers/gpu/drm/xe/xe_guc.c
+++ b/drivers/gpu/drm/xe/xe_guc.c
@@ -710,6 +710,10 @@ static int vf_guc_init_post_hwconfig(struct xe_guc *guc)
if (err)
return err;
+ err = xe_guc_buf_cache_init(&guc->buf);
+ if (err)
+ return err;
+
/* XXX xe_guc_db_mgr_init not needed for now */
return 0;
@@ -1098,14 +1102,6 @@ static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc)
struct xe_gt *gt = guc_to_gt(guc);
int ret;
- ret = xe_gt_sriov_vf_bootstrap(gt);
- if (ret)
- return ret;
-
- ret = xe_gt_sriov_vf_query_config(gt);
- if (ret)
- return ret;
-
ret = xe_guc_hwconfig_init(guc);
if (ret)
return ret;
@@ -1285,6 +1281,7 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
const u32 LAST_INDEX = VF_SW_FLAG_COUNT - 1;
+ bool lost = false;
int ret;
int i;
@@ -1318,6 +1315,12 @@ retry:
FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_GUC),
50000, &reply, false);
if (ret) {
+ /* scratch registers might be cleared during FLR, try once more */
+ if (!reply && !lost) {
+ xe_gt_dbg(gt, "GuC mmio request %#x: lost, trying again\n", request[0]);
+ lost = true;
+ goto retry;
+ }
timeout:
xe_gt_err(gt, "GuC mmio request %#x: no reply %#x\n",
request[0], reply);
diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c
index 44c1fa2fe7c8..07a027755627 100644
--- a/drivers/gpu/drm/xe/xe_guc_ads.c
+++ b/drivers/gpu/drm/xe/xe_guc_ads.c
@@ -20,6 +20,7 @@
#include "xe_gt_ccs_mode.h"
#include "xe_gt_printk.h"
#include "xe_guc.h"
+#include "xe_guc_buf.h"
#include "xe_guc_capture.h"
#include "xe_guc_ct.h"
#include "xe_hw_engine.h"
@@ -994,6 +995,16 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off
return xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
}
+static int guc_ads_update_policies(struct xe_guc_ads *ads, const struct guc_policies *policies)
+{
+ CLASS(xe_guc_buf_from_data, buf)(&ads_to_guc(ads)->buf, policies, sizeof(*policies));
+
+ if (!xe_guc_buf_is_valid(buf))
+ return -ENOBUFS;
+
+ return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf));
+}
+
/**
* xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy
* @ads: Additional data structures object
@@ -1005,11 +1016,8 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off
int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
{
struct xe_device *xe = ads_to_xe(ads);
- struct xe_gt *gt = ads_to_gt(ads);
- struct xe_tile *tile = gt_to_tile(gt);
struct guc_policies *policies;
- struct xe_bo *bo;
- int ret = 0;
+ int ret;
policies = kmalloc(sizeof(*policies), GFP_KERNEL);
if (!policies)
@@ -1023,16 +1031,7 @@ int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads)
else
policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET;
- bo = xe_managed_bo_create_from_data(xe, tile, policies, sizeof(struct guc_policies),
- XE_BO_FLAG_VRAM_IF_DGFX(tile) |
- XE_BO_FLAG_GGTT);
- if (IS_ERR(bo)) {
- ret = PTR_ERR(bo);
- goto out;
- }
-
- ret = guc_ads_action_update_policies(ads, xe_bo_ggtt_addr(bo));
-out:
+ ret = guc_ads_update_policies(ads, policies);
kfree(policies);
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_guc_buf.c b/drivers/gpu/drm/xe/xe_guc_buf.c
index 0193c94dd6a0..14a07dca48e7 100644
--- a/drivers/gpu/drm/xe/xe_guc_buf.c
+++ b/drivers/gpu/drm/xe/xe_guc_buf.c
@@ -37,10 +37,6 @@ int xe_guc_buf_cache_init(struct xe_guc_buf_cache *cache)
struct xe_gt *gt = cache_to_gt(cache);
struct xe_sa_manager *sam;
- /* XXX: currently it's useful only for the PF actions */
- if (!IS_SRIOV_PF(gt_to_xe(gt)))
- return 0;
-
sam = __xe_sa_bo_manager_init(gt_to_tile(gt), SZ_8K, 0, sizeof(u32));
if (IS_ERR(sam))
return PTR_ERR(sam);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 2447de0ebedf..37509f619503 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -25,6 +25,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_monitor.h"
+#include "xe_gt_sriov_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_log.h"
@@ -84,6 +85,8 @@ struct g2h_fence {
bool done;
};
+#define make_u64(hi, lo) ((u64)((u64)(u32)(hi) << 32 | (u32)(lo)))
+
static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
{
g2h_fence->response_buffer = response_buffer;
@@ -514,6 +517,9 @@ void xe_guc_ct_disable(struct xe_guc_ct *ct)
*/
void xe_guc_ct_stop(struct xe_guc_ct *ct)
{
+ if (!xe_guc_ct_initialized(ct))
+ return;
+
xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
stop_g2h_handler(ct);
}
@@ -625,6 +631,47 @@ static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
spin_unlock_irq(&ct->fast_lock);
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
+{
+ unsigned int slot = fence % ARRAY_SIZE(ct->fast_req);
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ unsigned long entries[SZ_32];
+ unsigned int n;
+
+ n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
+
+ /* May be called under spinlock, so avoid sleeping */
+ ct->fast_req[slot].stack = stack_depot_save(entries, n, GFP_NOWAIT);
+#endif
+ ct->fast_req[slot].fence = fence;
+ ct->fast_req[slot].action = action;
+}
+#else
+static void fast_req_track(struct xe_guc_ct *ct, u16 fence, u16 action)
+{
+}
+#endif
+
+/*
+ * The CT protocol accepts a 16 bits fence. This field is fully owned by the
+ * driver, the GuC will just copy it to the reply message. Since we need to
+ * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
+ * we use one bit of the seqno as an indicator for that and a rolling counter
+ * for the remaining 15 bits.
+ */
+#define CT_SEQNO_MASK GENMASK(14, 0)
+#define CT_SEQNO_UNTRACKED BIT(15)
+static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
+{
+ u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
+
+ if (!is_g2h_fence)
+ seqno |= CT_SEQNO_UNTRACKED;
+
+ return seqno;
+}
+
#define H2G_CT_HEADERS (GUC_CTB_HDR_LEN + 1) /* one DW CTB header and one DW HxG header */
static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
@@ -701,6 +748,9 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
} else {
+ fast_req_track(ct, ct_fence_value,
+ FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, action[0]));
+
cmd[1] =
FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_FAST_REQUEST) |
FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
@@ -733,25 +783,6 @@ corrupted:
return -EPIPE;
}
-/*
- * The CT protocol accepts a 16 bits fence. This field is fully owned by the
- * driver, the GuC will just copy it to the reply message. Since we need to
- * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
- * we use one bit of the seqno as an indicator for that and a rolling counter
- * for the remaining 15 bits.
- */
-#define CT_SEQNO_MASK GENMASK(14, 0)
-#define CT_SEQNO_UNTRACKED BIT(15)
-static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
-{
- u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
-
- if (!is_g2h_fence)
- seqno |= CT_SEQNO_UNTRACKED;
-
- return seqno;
-}
-
static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u32 len, u32 g2h_len, u32 num_g2h,
struct g2h_fence *g2h_fence)
@@ -760,7 +791,7 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
u16 seqno;
int ret;
- xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, xe_guc_ct_initialized(ct));
xe_gt_assert(gt, !g2h_len || !g2h_fence);
xe_gt_assert(gt, !num_g2h || !g2h_fence);
xe_gt_assert(gt, !g2h_len || num_g2h);
@@ -1143,6 +1174,55 @@ static int guc_crash_process_msg(struct xe_guc_ct *ct, u32 action)
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
+{
+ u16 fence_min = U16_MAX, fence_max = 0;
+ struct xe_gt *gt = ct_to_gt(ct);
+ bool found = false;
+ unsigned int n;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ char *buf;
+#endif
+
+ lockdep_assert_held(&ct->lock);
+
+ for (n = 0; n < ARRAY_SIZE(ct->fast_req); n++) {
+ if (ct->fast_req[n].fence < fence_min)
+ fence_min = ct->fast_req[n].fence;
+ if (ct->fast_req[n].fence > fence_max)
+ fence_max = ct->fast_req[n].fence;
+
+ if (ct->fast_req[n].fence != fence)
+ continue;
+ found = true;
+
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ buf = kmalloc(SZ_4K, GFP_NOWAIT);
+ if (buf && stack_depot_snprint(ct->fast_req[n].stack, buf, SZ_4K, 0))
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x sent at:\n%s",
+ fence, ct->fast_req[n].action, buf);
+ else
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x [failed to retrieve stack]\n",
+ fence, ct->fast_req[n].action);
+ kfree(buf);
+#else
+ xe_gt_err(gt, "Fence 0x%x was used by action %#04x\n",
+ fence, ct->fast_req[n].action);
+#endif
+ break;
+ }
+
+ if (!found)
+ xe_gt_warn(gt, "Fence 0x%x not found - tracking buffer wrapped? [range = 0x%x -> 0x%x, next = 0x%X]\n",
+ fence, fence_min, fence_max, ct->fence_seqno);
+}
+#else
+static void fast_req_report(struct xe_guc_ct *ct, u16 fence)
+{
+}
+#endif
+
static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{
struct xe_gt *gt = ct_to_gt(ct);
@@ -1171,6 +1251,9 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
else
xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n",
type, fence);
+
+ fast_req_report(ct, fence);
+
CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE);
return -EPROTO;
@@ -1344,7 +1427,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
u32 action;
u32 *hxg;
- xe_gt_assert(gt, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
+ xe_gt_assert(gt, xe_guc_ct_initialized(ct));
lockdep_assert_held(&ct->fast_lock);
if (ct->state == XE_GUC_CT_STATE_DISABLED)
@@ -1624,6 +1707,186 @@ static void g2h_worker_func(struct work_struct *w)
receive_g2h(ct);
}
+static void xe_fixup_u64_in_cmds(struct xe_device *xe, struct iosys_map *cmds,
+ u32 size, u32 idx, s64 shift)
+{
+ u32 hi, lo;
+ u64 offset;
+
+ lo = xe_map_rd_ring_u32(xe, cmds, idx, size);
+ hi = xe_map_rd_ring_u32(xe, cmds, idx + 1, size);
+ offset = make_u64(hi, lo);
+ offset += shift;
+ lo = lower_32_bits(offset);
+ hi = upper_32_bits(offset);
+ xe_map_wr_ring_u32(xe, cmds, idx, size, lo);
+ xe_map_wr_ring_u32(xe, cmds, idx + 1, size, hi);
+}
+
+/*
+ * Shift any GGTT addresses within a single message left within CTB from
+ * before post-migration recovery.
+ * @ct: pointer to CT struct of the target GuC
+ * @cmds: iomap buffer containing CT messages
+ * @head: start of the target message within the buffer
+ * @len: length of the target message
+ * @size: size of the commands buffer
+ * @shift: the address shift to be added to each GGTT reference
+ * Return: true if the message was fixed or needed no fixups, false on failure
+ */
+static bool ct_fixup_ggtt_in_message(struct xe_guc_ct *ct,
+ struct iosys_map *cmds, u32 head,
+ u32 len, u32 size, s64 shift)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 msg[GUC_HXG_MSG_MIN_LEN];
+ u32 action, i, n;
+
+ xe_gt_assert(gt, len >= GUC_HXG_MSG_MIN_LEN);
+
+ msg[0] = xe_map_rd_ring_u32(xe, cmds, head, size);
+ action = FIELD_GET(GUC_HXG_REQUEST_MSG_0_ACTION, msg[0]);
+
+ xe_gt_sriov_dbg_verbose(gt, "fixing H2G %#x\n", action);
+
+ switch (action) {
+ case XE_GUC_ACTION_REGISTER_CONTEXT:
+ if (len != XE_GUC_REGISTER_CONTEXT_MSG_LEN)
+ goto err_len;
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR, shift);
+ break;
+ case XE_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC:
+ if (len < XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN)
+ goto err_len;
+ n = xe_map_rd_ring_u32(xe, cmds, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS, size);
+ if (len != XE_GUC_REGISTER_CONTEXT_MULTI_LRC_MSG_MIN_LEN + 2 * n)
+ goto err_len;
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER,
+ shift);
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER,
+ shift);
+ for (i = 0; i < n; i++)
+ xe_fixup_u64_in_cmds(xe, cmds, size, head +
+ XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR
+ + 2 * i, shift);
+ break;
+ default:
+ break;
+ }
+ return true;
+
+err_len:
+ xe_gt_err(gt, "Skipped G2G %#x message fixups, unexpected length (%u)\n", action, len);
+ return false;
+}
+
+/*
+ * Apply fixups to the next outgoing CT message within given CTB
+ * @ct: the &xe_guc_ct struct instance representing the target GuC
+ * @h2g: the &guc_ctb struct instance of the target buffer
+ * @shift: shift to be added to all GGTT addresses within the CTB
+ * @mhead: pointer to an integer storing message start position; the
+ * position is changed to next message before this function return
+ * @avail: size of the area available for parsing, that is length
+ * of all remaining messages stored within the CTB
+ * Return: size of the area available for parsing after one message
+ * has been parsed, that is length remaining from the updated mhead
+ */
+static int ct_fixup_ggtt_in_buffer(struct xe_guc_ct *ct, struct guc_ctb *h2g,
+ s64 shift, u32 *mhead, s32 avail)
+{
+ struct xe_gt *gt = ct_to_gt(ct);
+ struct xe_device *xe = ct_to_xe(ct);
+ u32 msg[GUC_HXG_MSG_MIN_LEN];
+ u32 size = h2g->info.size;
+ u32 head = *mhead;
+ u32 len;
+
+ xe_gt_assert(gt, avail >= (s32)GUC_CTB_MSG_MIN_LEN);
+
+ /* Read header */
+ msg[0] = xe_map_rd_ring_u32(xe, &h2g->cmds, head, size);
+ len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, msg[0]) + GUC_CTB_MSG_MIN_LEN;
+
+ if (unlikely(len > (u32)avail)) {
+ xe_gt_err(gt, "H2G channel broken on read, avail=%d, len=%d, fixups skipped\n",
+ avail, len);
+ return 0;
+ }
+
+ head = (head + GUC_CTB_MSG_MIN_LEN) % size;
+ if (!ct_fixup_ggtt_in_message(ct, &h2g->cmds, head, msg_len_to_hxg_len(len), size, shift))
+ return 0;
+ *mhead = (head + msg_len_to_hxg_len(len)) % size;
+
+ return avail - len;
+}
+
+/**
+ * xe_guc_ct_fixup_messages_with_ggtt - Fixup any pending H2G CTB messages
+ * @ct: pointer to CT struct of the target GuC
+ * @ggtt_shift: shift to be added to all GGTT addresses within the CTB
+ *
+ * Messages in GuC to Host CTB are owned by GuC and any fixups in them
+ * are made by GuC. But content of the Host to GuC CTB is owned by the
+ * KMD, so fixups to GGTT references in any pending messages need to be
+ * applied here.
+ * This function updates GGTT offsets in payloads of pending H2G CTB
+ * messages (messages which were not consumed by GuC before the VF got
+ * paused).
+ */
+void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift)
+{
+ struct guc_ctb *h2g = &ct->ctbs.h2g;
+ struct xe_guc *guc = ct_to_guc(ct);
+ struct xe_gt *gt = guc_to_gt(guc);
+ u32 head, tail, size;
+ s32 avail;
+
+ if (unlikely(h2g->info.broken))
+ return;
+
+ h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
+ head = h2g->info.head;
+ tail = READ_ONCE(h2g->info.tail);
+ size = h2g->info.size;
+
+ if (unlikely(head > size))
+ goto corrupted;
+
+ if (unlikely(tail >= size))
+ goto corrupted;
+
+ avail = tail - head;
+
+ /* beware of buffer wrap case */
+ if (unlikely(avail < 0))
+ avail += size;
+ xe_gt_dbg(gt, "available %d (%u:%u:%u)\n", avail, head, tail, size);
+ xe_gt_assert(gt, avail >= 0);
+
+ while (avail > 0)
+ avail = ct_fixup_ggtt_in_buffer(ct, h2g, ggtt_shift, &head, avail);
+
+ return;
+
+corrupted:
+ xe_gt_err(gt, "Corrupted H2G descriptor head=%u tail=%u size=%u, fixups not applied\n",
+ head, tail, size);
+ h2g->info.broken = true;
+}
+
static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bool atomic,
bool want_ctb)
{
@@ -1770,6 +2033,24 @@ void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb)
}
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+
+#ifdef CONFIG_FUNCTION_ERROR_INJECTION
+/*
+ * This is a helper function which assists the driver in identifying if a fault
+ * injection test is currently active, allowing it to reduce unnecessary debug
+ * output. Typically, the function returns zero, but the fault injection
+ * framework can alter this to return an error. Since faults are injected
+ * through this function, it's important to ensure the compiler doesn't optimize
+ * it into an inline function. To avoid such optimization, the 'noinline'
+ * attribute is applied. Compiler optimizes the static function defined in the
+ * header file as an inline function.
+ */
+noinline int xe_is_injection_active(void) { return 0; }
+ALLOW_ERROR_INJECTION(xe_is_injection_active, ERRNO);
+#else
+int xe_is_injection_active(void) { return 0; }
+#endif
+
static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code)
{
struct xe_guc_log_snapshot *snapshot_log;
@@ -1780,6 +2061,12 @@ static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reaso
if (ctb)
ctb->info.broken = true;
+ /*
+ * Huge dump is getting generated when injecting error for guc CT/MMIO
+ * functions. So, let us suppress the dump when fault is injected.
+ */
+ if (xe_is_injection_active())
+ return;
/* Ignore further errors after the first dump until a reset */
if (ct->dead.reported)
@@ -1830,7 +2117,6 @@ static void ct_dead_print(struct xe_dead_ct *dead)
return;
}
-
/* Can't generate a genuine core dump at this point, so just do the good bits */
drm_puts(&lp, "**** Xe Device Coredump ****\n");
drm_printf(&lp, "Reason: CTB is dead - 0x%X\n", dead->reason);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h
index 82c4ae458dda..99c5dec446f2 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct.h
@@ -22,6 +22,13 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_pr
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot);
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool want_ctb);
+void xe_guc_ct_fixup_messages_with_ggtt(struct xe_guc_ct *ct, s64 ggtt_shift);
+
+static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct)
+{
+ return ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED;
+}
+
static inline bool xe_guc_ct_enabled(struct xe_guc_ct *ct)
{
return ct->state == XE_GUC_CT_STATE_ENABLED;
diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h
index 8e1b9d981d61..8b03b50313d9 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h
@@ -9,6 +9,7 @@
#include <linux/interrupt.h>
#include <linux/iosys-map.h>
#include <linux/spinlock_types.h>
+#include <linux/stackdepot.h>
#include <linux/wait.h>
#include <linux/xarray.h>
@@ -104,6 +105,18 @@ struct xe_dead_ct {
/** snapshot_log: copy of GuC log at point of error */
struct xe_guc_log_snapshot *snapshot_log;
};
+
+/** struct xe_fast_req_fence - Used to track FAST_REQ messages by fence to match error responses */
+struct xe_fast_req_fence {
+ /** @fence: sequence number sent in H2G and return in G2H error */
+ u16 fence;
+ /** @action: H2G action code */
+ u16 action;
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
+ /** @stack: call stack from when the H2G was sent */
+ depot_stack_handle_t stack;
+#endif
+};
#endif
/**
@@ -152,6 +165,8 @@ struct xe_guc_ct {
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
/** @dead: information for debugging dead CTs */
struct xe_dead_ct dead;
+ /** @fast_req: history of FAST_REQ messages for matching with G2H error responses */
+ struct xe_fast_req_fence fast_req[SZ_32];
#endif
};
diff --git a/drivers/gpu/drm/xe/xe_guc_engine_activity.c b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
index 0fb48f8f05d8..92e1f9f41b8c 100644
--- a/drivers/gpu/drm/xe/xe_guc_engine_activity.c
+++ b/drivers/gpu/drm/xe/xe_guc_engine_activity.c
@@ -124,7 +124,7 @@ static void free_engine_activity_buffers(struct engine_activity_buffer *buffer)
static bool is_engine_activity_supported(struct xe_guc *guc)
{
struct xe_uc_fw_version *version = &guc->fw.versions.found[XE_UC_FW_VER_COMPATIBILITY];
- struct xe_uc_fw_version required = { 1, 14, 1 };
+ struct xe_uc_fw_version required = { .major = 1, .minor = 14, .patch = 1 };
struct xe_gt *gt = guc_to_gt(guc);
if (IS_SRIOV_VF(gt_to_xe(gt))) {
diff --git a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
index 4c39f01e4f52..a3f421e2adc0 100644
--- a/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
+++ b/drivers/gpu/drm/xe/xe_guc_exec_queue_types.h
@@ -20,6 +20,8 @@ struct xe_exec_queue;
struct xe_guc_exec_queue {
/** @q: Backpointer to parent xe_exec_queue */
struct xe_exec_queue *q;
+ /** @rcu: For safe freeing of exported dma fences */
+ struct rcu_head rcu;
/** @sched: GPU scheduler for this xe_exec_queue */
struct xe_gpu_scheduler sched;
/** @entity: Scheduler entity for this xe_exec_queue */
diff --git a/drivers/gpu/drm/xe/xe_guc_log.h b/drivers/gpu/drm/xe/xe_guc_log.h
index 5b896f5fafaf..f1e2b0be90a9 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.h
+++ b/drivers/gpu/drm/xe/xe_guc_log.h
@@ -12,7 +12,7 @@
struct drm_printer;
struct xe_device;
-#if IS_ENABLED(CONFIG_DRM_XE_LARGE_GUC_BUFFER)
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_GUC)
#define CRASH_BUFFER_SIZE SZ_1M
#define DEBUG_BUFFER_SIZE SZ_8M
#define CAPTURE_BUFFER_SIZE SZ_2M
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index 18c623992035..9fab5f5b10fa 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -51,6 +51,7 @@
#define LNL_MERT_FREQ_CAP 800
#define BMG_MERT_FREQ_CAP 2133
+#define BMG_MIN_FREQ 1200
#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */
#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */
@@ -153,7 +154,7 @@ static int pc_action_reset(struct xe_guc_pc *pc)
int ret;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC reset failed: %pe\n",
ERR_PTR(ret));
@@ -177,7 +178,7 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc)
/* Blocking here to ensure the results are ready before reading them */
ret = xe_guc_ct_send_block(ct, action, ARRAY_SIZE(action));
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC query task state failed: %pe\n",
ERR_PTR(ret));
@@ -200,7 +201,7 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value)
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC set param[%u]=%u failed: %pe\n",
id, value, ERR_PTR(ret));
@@ -222,7 +223,7 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id)
return -EAGAIN;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC PC unset param failed: %pe",
ERR_PTR(ret));
@@ -239,7 +240,7 @@ static int pc_action_setup_gucrc(struct xe_guc_pc *pc, u32 mode)
int ret;
ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0);
- if (ret)
+ if (ret && !(xe_device_wedged(pc_to_xe(pc)) && ret == -ECANCELED))
xe_gt_err(pc_to_gt(pc), "GuC RC enable mode=%u failed: %pe\n",
mode, ERR_PTR(ret));
return ret;
@@ -817,6 +818,7 @@ void xe_guc_pc_init_early(struct xe_guc_pc *pc)
static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
{
+ struct xe_tile *tile = gt_to_tile(pc_to_gt(pc));
int ret;
lockdep_assert_held(&pc->freq_lock);
@@ -843,6 +845,9 @@ static int pc_adjust_freq_bounds(struct xe_guc_pc *pc)
if (pc_get_min_freq(pc) > pc->rp0_freq)
ret = pc_set_min_freq(pc, pc->rp0_freq);
+ if (XE_WA(tile->primary_gt, 14022085890))
+ ret = pc_set_min_freq(pc, max(BMG_MIN_FREQ, pc_get_min_freq(pc)));
+
out:
return ret;
}
@@ -1068,7 +1073,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc)
goto out;
}
- memset(pc->bo->vmap.vaddr, 0, size);
+ xe_map_memset(xe, &pc->bo->vmap, 0, 0, size);
slpc_shared_data_write(pc, header.size, size);
earlier = ktime_get();
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 6d84a52b660a..df7a5a4eec74 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -498,6 +498,15 @@ static void __register_mlrc_exec_queue(struct xe_guc *guc,
action[len++] = upper_32_bits(xe_lrc_descriptor(lrc));
}
+ /* explicitly checks some fields that we might fixup later */
+ xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_5_WQ_DESC_ADDR_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->wq_base_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_7_WQ_BUF_BASE_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), q->width ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_10_NUM_CTXS]);
+ xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_MULTI_LRC_DATA_11_HW_LRC_ADDR]);
xe_gt_assert(guc_to_gt(guc), len <= MAX_MLRC_REG_SIZE);
#undef MAX_MLRC_REG_SIZE
@@ -522,6 +531,14 @@ static void __register_exec_queue(struct xe_guc *guc,
info->hwlrca_hi,
};
+ /* explicitly checks some fields that we might fixup later */
+ xe_gt_assert(guc_to_gt(guc), info->wq_desc_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_5_WQ_DESC_ADDR_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->wq_base_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_7_WQ_BUF_BASE_LOWER]);
+ xe_gt_assert(guc_to_gt(guc), info->hwlrca_lo ==
+ action[XE_GUC_REGISTER_CONTEXT_DATA_10_HW_LRC_ADDR]);
+
xe_guc_ct_send(&guc->ct, action, ARRAY_SIZE(action), 0, 0);
}
@@ -970,10 +987,7 @@ static bool check_timeout(struct xe_exec_queue *q, struct xe_sched_job *job)
*/
xe_gt_assert(gt, timeout_ms < 100 * MSEC_PER_SEC);
- if (ctx_timestamp < ctx_job_timestamp)
- diff = ctx_timestamp + U32_MAX - ctx_job_timestamp;
- else
- diff = ctx_timestamp - ctx_job_timestamp;
+ diff = ctx_timestamp - ctx_job_timestamp;
/*
* Ensure timeout is within 5% to account for an GuC scheduling latency
@@ -1285,7 +1299,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
- kfree(ge);
+ /*
+ * RCU free due sched being exported via DRM scheduler fences
+ * (timeline name).
+ */
+ kfree_rcu(ge, rcu);
xe_exec_queue_fini(q);
xe_pm_runtime_put(guc_to_xe(guc));
}
@@ -1468,6 +1486,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
q->guc = ge;
ge->q = q;
+ init_rcu_head(&ge->rcu);
init_waitqueue_head(&ge->suspend_wait);
for (i = 0; i < MAX_STATIC_MSG_TYPE; ++i)
@@ -1762,6 +1781,9 @@ int xe_guc_submit_reset_prepare(struct xe_guc *guc)
{
int ret;
+ if (!guc->submission_state.initialized)
+ return 0;
+
/*
* Using an atomic here rather than submission_state.lock as this
* function can be called while holding the CT lock (engine reset
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 93241fd0a4ba..3439c8522d01 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -17,6 +17,7 @@
#include "regs/xe_irq_regs.h"
#include "xe_assert.h"
#include "xe_bo.h"
+#include "xe_configfs.h"
#include "xe_device.h"
#include "xe_execlist.h"
#include "xe_force_wake.h"
@@ -693,7 +694,7 @@ static void read_media_fuses(struct xe_gt *gt)
if (!(BIT(j) & vdbox_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "vcs%u fused off\n", j);
+ xe_gt_info(gt, "vcs%u fused off\n", j);
}
}
@@ -703,7 +704,7 @@ static void read_media_fuses(struct xe_gt *gt)
if (!(BIT(j) & vebox_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "vecs%u fused off\n", j);
+ xe_gt_info(gt, "vecs%u fused off\n", j);
}
}
}
@@ -728,15 +729,13 @@ static void read_copy_fuses(struct xe_gt *gt)
if (!(BIT(j / 2) & bcs_mask)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "bcs%u fused off\n", j);
+ xe_gt_info(gt, "bcs%u fused off\n", j);
}
}
}
static void read_compute_fuses_from_dss(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
-
/*
* CCS fusing based on DSS masks only applies to platforms that can
* have more than one CCS.
@@ -755,14 +754,13 @@ static void read_compute_fuses_from_dss(struct xe_gt *gt)
if (!xe_gt_topology_has_dss_in_quadrant(gt, j)) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "ccs%u fused off\n", j);
+ xe_gt_info(gt, "ccs%u fused off\n", j);
}
}
}
static void read_compute_fuses_from_reg(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
u32 ccs_mask;
ccs_mask = xe_mmio_read32(&gt->mmio, XEHP_FUSE4);
@@ -774,7 +772,7 @@ static void read_compute_fuses_from_reg(struct xe_gt *gt)
if ((ccs_mask & BIT(j)) == 0) {
gt->info.engine_mask &= ~BIT(i);
- drm_info(&xe->drm, "ccs%u fused off\n", j);
+ xe_gt_info(gt, "ccs%u fused off\n", j);
}
}
}
@@ -789,8 +787,6 @@ static void read_compute_fuses(struct xe_gt *gt)
static void check_gsc_availability(struct xe_gt *gt)
{
- struct xe_device *xe = gt_to_xe(gt);
-
if (!(gt->info.engine_mask & BIT(XE_HW_ENGINE_GSCCS0)))
return;
@@ -806,7 +802,25 @@ static void check_gsc_availability(struct xe_gt *gt)
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_ENABLE, 0);
xe_mmio_write32(&gt->mmio, GUNIT_GSC_INTR_MASK, ~0);
- drm_dbg(&xe->drm, "GSC FW not used, disabling gsccs\n");
+ xe_gt_dbg(gt, "GSC FW not used, disabling gsccs\n");
+ }
+}
+
+static void check_sw_disable(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ u64 sw_allowed = xe_configfs_get_engines_allowed(to_pci_dev(xe->drm.dev));
+ enum xe_hw_engine_id id;
+
+ for (id = 0; id < XE_NUM_HW_ENGINES; ++id) {
+ if (!(gt->info.engine_mask & BIT(id)))
+ continue;
+
+ if (!(sw_allowed & BIT(id))) {
+ gt->info.engine_mask &= ~BIT(id);
+ xe_gt_info(gt, "%s disabled via configfs\n",
+ engine_infos[id].name);
+ }
}
}
@@ -818,6 +832,7 @@ int xe_hw_engines_init_early(struct xe_gt *gt)
read_copy_fuses(gt);
read_compute_fuses(gt);
check_gsc_availability(gt);
+ check_sw_disable(gt);
BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT < XE_HW_ENGINE_PREEMPT_TIMEOUT_MIN);
BUILD_BUG_ON(XE_HW_ENGINE_PREEMPT_TIMEOUT > XE_HW_ENGINE_PREEMPT_TIMEOUT_MAX);
diff --git a/drivers/gpu/drm/xe/xe_hw_fence.c b/drivers/gpu/drm/xe/xe_hw_fence.c
index 0b4f12be3692..b2a0c46dfcd4 100644
--- a/drivers/gpu/drm/xe/xe_hw_fence.c
+++ b/drivers/gpu/drm/xe/xe_hw_fence.c
@@ -100,6 +100,9 @@ void xe_hw_fence_irq_finish(struct xe_hw_fence_irq *irq)
spin_unlock_irqrestore(&irq->lock, flags);
dma_fence_end_signalling(tmp);
}
+
+ /* Safe release of the irq->lock used in dma_fence_init. */
+ synchronize_rcu();
}
void xe_hw_fence_irq_run(struct xe_hw_fence_irq *irq)
@@ -165,7 +168,7 @@ static bool xe_hw_fence_signaled(struct dma_fence *dma_fence)
u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32);
return dma_fence->error ||
- !__dma_fence_is_later(dma_fence->seqno, seqno, dma_fence->ops);
+ !__dma_fence_is_later(dma_fence, dma_fence->seqno, seqno);
}
static bool xe_hw_fence_enable_signaling(struct dma_fence *dma_fence)
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 74f31639b37f..f08fc4377d25 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -20,6 +20,8 @@
#include "xe_pcode_api.h"
#include "xe_sriov.h"
#include "xe_pm.h"
+#include "xe_vsec.h"
+#include "regs/xe_pmt.h"
enum xe_hwmon_reg {
REG_TEMP,
@@ -51,6 +53,14 @@ enum xe_fan_channel {
FAN_MAX,
};
+/* Attribute index for powerX_xxx_interval sysfs entries */
+enum sensor_attr_power {
+ SENSOR_INDEX_PSYS_PL1,
+ SENSOR_INDEX_PKG_PL1,
+ SENSOR_INDEX_PSYS_PL2,
+ SENSOR_INDEX_PKG_PL2,
+};
+
/*
* For platforms that support mailbox commands for power limits, REG_PKG_POWER_SKU_UNIT is
* not supported and below are SKU units to be used.
@@ -72,8 +82,9 @@ enum xe_fan_channel {
* PL*_HWMON_ATTR - mapping of hardware power limits to corresponding hwmon power attribute.
*/
#define PL1_HWMON_ATTR hwmon_power_max
+#define PL2_HWMON_ATTR hwmon_power_cap
-#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "Invalid")
+#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "PL2")
/*
* Timeout for power limit write mailbox command.
@@ -124,6 +135,9 @@ struct xe_hwmon {
bool boot_power_limit_read;
/** @pl1_on_boot: power limit PL1 on boot */
u32 pl1_on_boot[CHANNEL_MAX];
+ /** @pl2_on_boot: power limit PL2 on boot */
+ u32 pl2_on_boot[CHANNEL_MAX];
+
};
static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 attr, int channel,
@@ -151,16 +165,18 @@ static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 att
/* return the value only if limit is enabled */
if (attr == PL1_HWMON_ATTR)
*uval = (val0 & PWR_LIM_EN) ? val0 : 0;
+ else if (attr == PL2_HWMON_ATTR)
+ *uval = (val1 & PWR_LIM_EN) ? val1 : 0;
else if (attr == hwmon_power_label)
- *uval = (val0 & PWR_LIM_EN) ? 1 : 0;
+ *uval = (val0 & PWR_LIM_EN) ? 1 : (val1 & PWR_LIM_EN) ? 1 : 0;
else
*uval = 0;
return ret;
}
-static int xe_hwmon_pcode_write_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel,
- u32 uval)
+static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel,
+ u32 clr, u32 set)
{
struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
u32 val0, val1;
@@ -179,7 +195,9 @@ static int xe_hwmon_pcode_write_power_limit(const struct xe_hwmon *hwmon, u32 at
channel, val0, val1, ret);
if (attr == PL1_HWMON_ATTR)
- val0 = uval;
+ val0 = (val0 & ~clr) | set;
+ else if (attr == PL2_HWMON_ATTR)
+ val1 = (val1 & ~clr) | set;
else
return -EIO;
@@ -236,12 +254,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
return GT_PERF_STATUS;
break;
case REG_PKG_ENERGY_STATUS:
- if (xe->info.platform == XE_BATTLEMAGE) {
- if (channel == CHANNEL_PKG)
- return BMG_PACKAGE_ENERGY_STATUS;
- else
- return BMG_PLATFORM_ENERGY_STATUS;
- } else if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
+ if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
return PVC_GT0_PLATFORM_ENERGY_STATUS;
} else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
return PCU_CR_PACKAGE_ENERGY_STATUS;
@@ -273,7 +286,7 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
*/
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
{
- u64 reg_val, min, max;
+ u64 reg_val = 0, min, max;
struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
@@ -285,16 +298,6 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channe
} else {
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
-
- /*
- * Valid check of REG_PKG_RAPL_LIMIT is already done in xe_hwmon_power_is_visible.
- * So not checking it again here.
- */
- if (!xe_reg_is_valid(pkg_power_sku)) {
- drm_warn(&xe->drm, "pkg_power_sku invalid\n");
- *value = 0;
- goto unlock;
- }
reg_val = xe_mmio_read32(mmio, rapl_limit);
}
@@ -327,7 +330,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
int ret = 0;
- u32 reg_val;
+ u32 reg_val, max;
struct xe_reg rapl_limit;
mutex_lock(&hwmon->hwmon_lock);
@@ -339,7 +342,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
if (hwmon->xe->info.has_mbx_power_limits) {
drm_dbg(&hwmon->xe->drm, "disabling %s on channel %d\n",
PWR_ATTR_TO_STR(attr), channel);
- xe_hwmon_pcode_write_power_limit(hwmon, attr, channel, 0);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM_EN, 0);
xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &reg_val);
} else {
reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN, 0);
@@ -355,25 +358,29 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
- reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val);
/*
- * Clamp power limit to card-firmware default as maximum, as an additional protection to
+ * Clamp power limit to GPU firmware default as maximum, as an additional protection to
* pcode clamp.
*/
if (hwmon->xe->info.has_mbx_power_limits) {
- if (reg_val > REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel])) {
- reg_val = REG_FIELD_GET(PWR_LIM_VAL, hwmon->pl1_on_boot[channel]);
- drm_dbg(&hwmon->xe->drm, "Clamping power limit to firmware default 0x%x\n",
+ max = (attr == PL1_HWMON_ATTR) ?
+ hwmon->pl1_on_boot[channel] : hwmon->pl2_on_boot[channel];
+ max = REG_FIELD_PREP(PWR_LIM_VAL, max);
+ if (reg_val > max) {
+ reg_val = max;
+ drm_dbg(&hwmon->xe->drm,
+ "Clamping power limit to GPU firmware default 0x%x\n",
reg_val);
}
}
+ reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val);
+
if (hwmon->xe->info.has_mbx_power_limits)
- ret = xe_hwmon_pcode_write_power_limit(hwmon, attr, channel, reg_val);
+ ret = xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM, reg_val);
else
- reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM_EN | PWR_LIM_VAL,
- reg_val);
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PWR_LIM, reg_val);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
return ret;
@@ -428,16 +435,37 @@ xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
- u64 reg_val;
+ u32 reg_val;
+ int ret = 0;
- reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel));
+ /* Energy is supported only for card and pkg */
+ if (channel > CHANNEL_PKG) {
+ *energy = 0;
+ return;
+ }
- if (reg_val >= ei->reg_val_prev)
- ei->accum_energy += reg_val - ei->reg_val_prev;
- else
- ei->accum_energy += UINT_MAX - ei->reg_val_prev + reg_val;
+ if (hwmon->xe->info.platform == XE_BATTLEMAGE) {
+ u64 pmt_val;
+
+ ret = xe_pmt_telem_read(to_pci_dev(hwmon->xe->drm.dev),
+ xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID),
+ &pmt_val, BMG_ENERGY_STATUS_PMT_OFFSET, sizeof(pmt_val));
+ if (ret != sizeof(pmt_val)) {
+ drm_warn(&hwmon->xe->drm, "energy read from pmt failed, ret %d\n", ret);
+ *energy = 0;
+ return;
+ }
+
+ if (channel == CHANNEL_PKG)
+ reg_val = REG_FIELD_GET64(ENERGY_PKG, pmt_val);
+ else
+ reg_val = REG_FIELD_GET64(ENERGY_CARD, pmt_val);
+ } else {
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel));
+ }
+ ei->accum_energy += reg_val - ei->reg_val_prev;
ei->reg_val_prev = reg_val;
*energy = mul_u64_u32_shr(ei->accum_energy, SF_ENERGY,
@@ -452,8 +480,9 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
- int channel = to_sensor_dev_attr(attr)->index;
- u32 power_attr = PL1_HWMON_ATTR;
+ int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
+
int ret = 0;
xe_pm_runtime_get(hwmon->xe);
@@ -506,9 +535,9 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
+ int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
int ret;
- int channel = to_sensor_dev_attr(attr)->index;
- u32 power_attr = PL1_HWMON_ATTR;
ret = kstrtoul(buf, 0, &val);
if (ret)
@@ -535,10 +564,8 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
- if (val > max_win) {
- drm_warn(&hwmon->xe->drm, "power_interval invalid val 0x%lx\n", val);
+ if (val > max_win)
return -EINVAL;
- }
/* val in hw units */
val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME) + 1;
@@ -563,14 +590,11 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
mutex_lock(&hwmon->hwmon_lock);
- if (hwmon->xe->info.has_mbx_power_limits) {
- ret = xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, (u32 *)&r);
- r = (r & ~PWR_LIM_TIME) | rxy;
- xe_hwmon_pcode_write_power_limit(hwmon, power_attr, channel, r);
- } else {
+ if (hwmon->xe->info.has_mbx_power_limits)
+ xe_hwmon_pcode_rmw_power_limit(hwmon, power_attr, channel, PWR_LIM_TIME, rxy);
+ else
r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel),
PWR_LIM_TIME, rxy);
- }
mutex_unlock(&hwmon->hwmon_lock);
@@ -582,15 +606,25 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
/* PSYS PL1 */
static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
xe_hwmon_power_max_interval_show,
- xe_hwmon_power_max_interval_store, CHANNEL_CARD);
-
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL1);
+/* PKG PL1 */
static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
xe_hwmon_power_max_interval_show,
- xe_hwmon_power_max_interval_store, CHANNEL_PKG);
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL1);
+/* PSYS PL2 */
+static SENSOR_DEVICE_ATTR(power1_cap_interval, 0664,
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL2);
+/* PKG PL2 */
+static SENSOR_DEVICE_ATTR(power2_cap_interval, 0664,
+ xe_hwmon_power_max_interval_show,
+ xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL2);
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_power1_max_interval.dev_attr.attr,
&sensor_dev_attr_power2_max_interval.dev_attr.attr,
+ &sensor_dev_attr_power1_cap_interval.dev_attr.attr,
+ &sensor_dev_attr_power2_cap_interval.dev_attr.attr,
NULL
};
@@ -600,19 +634,22 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
- int channel = index ? CHANNEL_PKG : CHANNEL_CARD;
- u32 power_attr = PL1_HWMON_ATTR;
- u32 uval;
+ int channel = (index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
+ u32 power_attr = (index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
+ u32 uval = 0;
+ struct xe_reg rapl_limit;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
xe_pm_runtime_get(hwmon->xe);
if (hwmon->xe->info.has_mbx_power_limits) {
xe_hwmon_pcode_read_power_limit(hwmon, power_attr, channel, &uval);
- ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
- } else {
- ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
- channel)) ? attr->mode : 0;
+ } else if (power_attr != PL2_HWMON_ATTR) {
+ rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(rapl_limit))
+ uval = xe_mmio_read32(mmio, rapl_limit);
}
+ ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
xe_pm_runtime_put(hwmon->xe);
@@ -632,8 +669,9 @@ static const struct attribute_group *hwmon_groups[] = {
static const struct hwmon_channel_info * const hwmon_info[] = {
HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
HWMON_T_INPUT | HWMON_T_LABEL),
- HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT,
- HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL),
+ HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT |
+ HWMON_P_CAP,
+ HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CAP),
HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
@@ -754,31 +792,62 @@ xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
- u32 uval;
+ u32 uval = 0;
+ struct xe_reg reg;
+ struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe);
switch (attr) {
case hwmon_power_max:
+ case hwmon_power_cap:
if (hwmon->xe->info.has_mbx_power_limits) {
xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval);
- return (uval) ? 0664 : 0;
- } else {
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT,
- channel)) ? 0664 : 0;
+ } else if (attr != PL2_HWMON_ATTR) {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
}
+ if (uval & PWR_LIM_EN) {
+ drm_info(&hwmon->xe->drm, "%s is supported on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ return 0664;
+ }
+ drm_dbg(&hwmon->xe->drm, "%s is unsupported on channel %d\n",
+ PWR_ATTR_TO_STR(attr), channel);
+ return 0;
case hwmon_power_rated_max:
- if (hwmon->xe->info.has_mbx_power_limits)
+ if (hwmon->xe->info.has_mbx_power_limits) {
return 0;
- else
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU,
- channel)) ? 0444 : 0;
+ } else {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ return uval ? 0444 : 0;
+ }
case hwmon_power_crit:
- case hwmon_power_label:
if (channel == CHANNEL_CARD) {
xe_hwmon_pcode_read_i1(hwmon, &uval);
- return (uval & POWER_SETUP_I1_WATTS) ? (attr == hwmon_power_label) ?
- 0444 : 0644 : 0;
+ return (uval & POWER_SETUP_I1_WATTS) ? 0644 : 0;
}
break;
+ case hwmon_power_label:
+ if (hwmon->xe->info.has_mbx_power_limits) {
+ xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, &uval);
+ } else {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+
+ if (!uval) {
+ reg = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
+ if (xe_reg_is_valid(reg))
+ uval = xe_mmio_read32(mmio, reg);
+ }
+ }
+ if ((!(uval & PWR_LIM_EN)) && channel == CHANNEL_CARD) {
+ xe_hwmon_pcode_read_i1(hwmon, &uval);
+ return (uval & POWER_SETUP_I1_WATTS) ? 0444 : 0;
+ }
+ return (uval) ? 0444 : 0;
default:
return 0;
}
@@ -790,6 +859,7 @@ xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
{
switch (attr) {
case hwmon_power_max:
+ case hwmon_power_cap:
xe_hwmon_power_max_read(hwmon, attr, channel, val);
return 0;
case hwmon_power_rated_max:
@@ -806,6 +876,7 @@ static int
xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
{
switch (attr) {
+ case hwmon_power_cap:
case hwmon_power_max:
return xe_hwmon_power_max_write(hwmon, attr, channel, val);
case hwmon_power_crit:
@@ -888,11 +959,18 @@ xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
static umode_t
xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
{
+ long energy = 0;
+
switch (attr) {
case hwmon_energy_input:
case hwmon_energy_label:
- return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel)) ? 0444 : 0;
+ if (hwmon->xe->info.platform == XE_BATTLEMAGE) {
+ xe_hwmon_energy_get(hwmon, channel, &energy);
+ return energy ? 0444 : 0;
+ } else {
+ return xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel)) ? 0444 : 0;
+ }
default:
return 0;
}
@@ -1128,22 +1206,32 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
struct xe_reg pkg_power_sku_unit;
if (hwmon->xe->info.has_mbx_power_limits) {
- /* Check if card firmware support mailbox power limits commands. */
+ /* Check if GPU firmware support mailbox power limits commands. */
if (xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_CARD,
&hwmon->pl1_on_boot[CHANNEL_CARD]) |
xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, CHANNEL_PKG,
- &hwmon->pl1_on_boot[CHANNEL_PKG])) {
+ &hwmon->pl1_on_boot[CHANNEL_PKG]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_CARD,
+ &hwmon->pl2_on_boot[CHANNEL_CARD]) |
+ xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, CHANNEL_PKG,
+ &hwmon->pl2_on_boot[CHANNEL_PKG])) {
drm_warn(&hwmon->xe->drm,
- "Failed to read power limits, check card firmware !\n");
+ "Failed to read power limits, check GPU firmware !\n");
} else {
drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n");
/* Write default limits to read from pcode from now on. */
- xe_hwmon_pcode_write_power_limit(hwmon, PL1_HWMON_ATTR,
- CHANNEL_CARD,
- hwmon->pl1_on_boot[CHANNEL_CARD]);
- xe_hwmon_pcode_write_power_limit(hwmon, PL1_HWMON_ATTR,
- CHANNEL_PKG,
- hwmon->pl1_on_boot[CHANNEL_PKG]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
+ CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl1_on_boot[CHANNEL_CARD]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
+ CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl1_on_boot[CHANNEL_PKG]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR,
+ CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl2_on_boot[CHANNEL_CARD]);
+ xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR,
+ CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
+ hwmon->pl2_on_boot[CHANNEL_PKG]);
hwmon->scl_shift_power = PWR_UNIT;
hwmon->scl_shift_energy = ENERGY_UNIT;
hwmon->scl_shift_time = TIME_UNIT;
@@ -1227,4 +1315,4 @@ int xe_hwmon_register(struct xe_device *xe)
return 0;
}
-
+MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY");
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 63d74e27f54c..37598588a54f 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -40,6 +40,7 @@
#define LRC_PPHWSP_SIZE SZ_4K
#define LRC_INDIRECT_RING_STATE_SIZE SZ_4K
+#define LRC_WA_BB_SIZE SZ_4K
static struct xe_device *
lrc_to_xe(struct xe_lrc *lrc)
@@ -654,8 +655,8 @@ u32 xe_lrc_pphwsp_offset(struct xe_lrc *lrc)
#define LRC_SEQNO_PPHWSP_OFFSET 512
#define LRC_START_SEQNO_PPHWSP_OFFSET (LRC_SEQNO_PPHWSP_OFFSET + 8)
#define LRC_CTX_JOB_TIMESTAMP_OFFSET (LRC_START_SEQNO_PPHWSP_OFFSET + 8)
+#define LRC_ENGINE_ID_PPHWSP_OFFSET 1024
#define LRC_PARALLEL_PPHWSP_OFFSET 2048
-#define LRC_ENGINE_ID_PPHWSP_OFFSET 2096
u32 xe_lrc_regs_offset(struct xe_lrc *lrc)
{
@@ -910,13 +911,11 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
{
xe_hw_fence_ctx_finish(&lrc->fence_ctx);
xe_bo_unpin_map_no_vm(lrc->bo);
- xe_bo_unpin_map_no_vm(lrc->bb_per_ctx_bo);
}
/*
- * xe_lrc_setup_utilization() - Setup wa bb to assist in calculating active
- * context run ticks.
- * @lrc: Pointer to the lrc.
+ * wa_bb_setup_utilization() - Write commands to wa bb to assist
+ * in calculating active context run ticks.
*
* Context Timestamp (CTX_TIMESTAMP) in the LRC accumulates the run ticks of the
* context, but only gets updated when the context switches out. In order to
@@ -941,11 +940,13 @@ static void xe_lrc_finish(struct xe_lrc *lrc)
* store it in the PPHSWP.
*/
#define CONTEXT_ACTIVE 1ULL
-static void xe_lrc_setup_utilization(struct xe_lrc *lrc)
+static ssize_t wa_bb_setup_utilization(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_len)
{
- u32 *cmd;
+ u32 *cmd = batch;
- cmd = lrc->bb_per_ctx_bo->vmap.vaddr;
+ if (xe_gt_WARN_ON(lrc->gt, max_len < 12))
+ return -ENOSPC;
*cmd++ = MI_STORE_REGISTER_MEM | MI_SRM_USE_GGTT | MI_SRM_ADD_CS_OFFSET;
*cmd++ = ENGINE_ID(0).addr;
@@ -964,11 +965,71 @@ static void xe_lrc_setup_utilization(struct xe_lrc *lrc)
*cmd++ = upper_32_bits(CONTEXT_ACTIVE);
}
+ return cmd - batch;
+}
+
+struct wa_bb_setup {
+ ssize_t (*setup)(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
+ u32 *batch, size_t max_size);
+};
+
+static size_t wa_bb_offset(struct xe_lrc *lrc)
+{
+ return lrc->bo->size - LRC_WA_BB_SIZE;
+}
+
+static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe)
+{
+ const size_t max_size = LRC_WA_BB_SIZE;
+ static const struct wa_bb_setup funcs[] = {
+ { .setup = wa_bb_setup_utilization },
+ };
+ ssize_t remain;
+ u32 *cmd, *buf = NULL;
+
+ if (lrc->bo->vmap.is_iomem) {
+ buf = kmalloc(max_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ cmd = buf;
+ } else {
+ cmd = lrc->bo->vmap.vaddr + wa_bb_offset(lrc);
+ }
+
+ remain = max_size / sizeof(*cmd);
+
+ for (size_t i = 0; i < ARRAY_SIZE(funcs); i++) {
+ ssize_t len = funcs[i].setup(lrc, hwe, cmd, remain);
+
+ remain -= len;
+
+ /*
+ * There should always be at least 1 additional dword for
+ * the end marker
+ */
+ if (len < 0 || xe_gt_WARN_ON(lrc->gt, remain < 1))
+ goto fail;
+
+ cmd += len;
+ }
+
*cmd++ = MI_BATCH_BUFFER_END;
- xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR,
- xe_bo_ggtt_addr(lrc->bb_per_ctx_bo) | 1);
+ if (buf) {
+ xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bo->vmap,
+ wa_bb_offset(lrc), buf,
+ (cmd - buf) * sizeof(*cmd));
+ kfree(buf);
+ }
+
+ xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR, xe_bo_ggtt_addr(lrc->bo) +
+ wa_bb_offset(lrc) + 1);
+
+ return 0;
+fail:
+ kfree(buf);
+ return -ENOSPC;
}
#define PVC_CTX_ASID (0x2e + 1)
@@ -1004,20 +1065,13 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
* FIXME: Perma-pinning LRC as we don't yet support moving GGTT address
* via VM bind calls.
*/
- lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, lrc_size,
+ lrc->bo = xe_bo_create_pin_map(xe, tile, NULL,
+ lrc_size + LRC_WA_BB_SIZE,
ttm_bo_type_kernel,
bo_flags);
if (IS_ERR(lrc->bo))
return PTR_ERR(lrc->bo);
- lrc->bb_per_ctx_bo = xe_bo_create_pin_map(xe, tile, NULL, SZ_4K,
- ttm_bo_type_kernel,
- bo_flags);
- if (IS_ERR(lrc->bb_per_ctx_bo)) {
- err = PTR_ERR(lrc->bb_per_ctx_bo);
- goto err_lrc_finish;
- }
-
lrc->size = lrc_size;
lrc->ring.size = ring_size;
lrc->ring.tail = 0;
@@ -1125,7 +1179,9 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe,
map = __xe_lrc_start_seqno_map(lrc);
xe_map_write32(lrc_to_xe(lrc), &map, lrc->fence_ctx.next_seqno - 1);
- xe_lrc_setup_utilization(lrc);
+ err = setup_wa_bb(lrc, hwe);
+ if (err)
+ goto err_lrc_finish;
return 0;
@@ -1803,7 +1859,8 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
snapshot->seqno = xe_lrc_seqno(lrc);
snapshot->lrc_bo = xe_bo_get(lrc->bo);
snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc);
- snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset;
+ snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset -
+ LRC_WA_BB_SIZE;
snapshot->lrc_snapshot = NULL;
snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc));
snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc);
diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h
index ae24cf6f8dd9..883e550a9423 100644
--- a/drivers/gpu/drm/xe/xe_lrc_types.h
+++ b/drivers/gpu/drm/xe/xe_lrc_types.h
@@ -53,9 +53,6 @@ struct xe_lrc {
/** @ctx_timestamp: readout value of CTX_TIMESTAMP on last update */
u64 ctx_timestamp;
-
- /** @bb_per_ctx_bo: buffer object for per context batch wa buffer */
- struct xe_bo *bb_per_ctx_bo;
};
struct xe_lrc_snapshot;
diff --git a/drivers/gpu/drm/xe/xe_map.h b/drivers/gpu/drm/xe/xe_map.h
index f62e0c8b67ab..8d67f6ba2d95 100644
--- a/drivers/gpu/drm/xe/xe_map.h
+++ b/drivers/gpu/drm/xe/xe_map.h
@@ -78,6 +78,24 @@ static inline void xe_map_write32(struct xe_device *xe, struct iosys_map *map,
iosys_map_wr(map__, offset__, type__, val__); \
})
+#define xe_map_rd_array(xe__, map__, index__, type__) \
+ xe_map_rd(xe__, map__, (index__) * sizeof(type__), type__)
+
+#define xe_map_wr_array(xe__, map__, index__, type__, val__) \
+ xe_map_wr(xe__, map__, (index__) * sizeof(type__), type__, val__)
+
+#define xe_map_rd_array_u32(xe__, map__, index__) \
+ xe_map_rd_array(xe__, map__, index__, u32)
+
+#define xe_map_wr_array_u32(xe__, map__, index__, val__) \
+ xe_map_wr_array(xe__, map__, index__, u32, val__)
+
+#define xe_map_rd_ring_u32(xe__, map__, index__, size__) \
+ xe_map_rd_array_u32(xe__, map__, (index__) % (size__))
+
+#define xe_map_wr_ring_u32(xe__, map__, index__, size__, val__) \
+ xe_map_wr_array_u32(xe__, map__, (index__) % (size__), val__)
+
#define xe_map_rd_field(xe__, map__, struct_offset__, struct_type__, field__) ({ \
struct xe_device *__xe = xe__; \
xe_device_assert_mem_access(__xe); \
diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c
index e4742e27e2cd..e332f3142435 100644
--- a/drivers/gpu/drm/xe/xe_module.c
+++ b/drivers/gpu/drm/xe/xe_module.c
@@ -18,9 +18,15 @@
#include "xe_observation.h"
#include "xe_sched_job.h"
+#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
+#define DEFAULT_GUC_LOG_LEVEL 3
+#else
+#define DEFAULT_GUC_LOG_LEVEL 1
+#endif
+
struct xe_modparam xe_modparam = {
.probe_display = true,
- .guc_log_level = 3,
+ .guc_log_level = DEFAULT_GUC_LOG_LEVEL,
.force_probe = CONFIG_DRM_XE_FORCE_PROBE,
.wedged_mode = 1,
.svm_notifier_size = 512,
@@ -40,7 +46,8 @@ module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600);
MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size (in MiB) - <0=disable-resize, 0=max-needed-size[default], >0=force-size");
module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600);
-MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1..5=enable with verbosity min..max)");
+MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1=normal, 2..5=verbose-levels "
+ "[default=" __stringify(DEFAULT_GUC_LOG_LEVEL) "])");
module_param_named_unsafe(guc_firmware_path, xe_modparam.guc_firmware_path, charp, 0400);
MODULE_PARM_DESC(guc_firmware_path,
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index fb842fa0552e..4829ed46a8b4 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -43,6 +43,12 @@
#define DEFAULT_POLL_PERIOD_NS (NSEC_PER_SEC / DEFAULT_POLL_FREQUENCY_HZ)
#define XE_OA_UNIT_INVALID U32_MAX
+enum xe_oam_unit_type {
+ XE_OAM_UNIT_SAG,
+ XE_OAM_UNIT_SCMI_0,
+ XE_OAM_UNIT_SCMI_1,
+};
+
enum xe_oa_submit_deps {
XE_OA_SUBMIT_NO_DEPS,
XE_OA_SUBMIT_ADD_DEPS,
@@ -77,7 +83,7 @@ struct xe_oa_config {
struct xe_oa_open_param {
struct xe_file *xef;
- u32 oa_unit_id;
+ struct xe_oa_unit *oa_unit;
bool sample;
u32 metric_set;
enum xe_oa_format_name oa_format;
@@ -194,7 +200,7 @@ static void free_oa_config_bo(struct xe_oa_config_bo *oa_bo, struct dma_fence *l
static const struct xe_oa_regs *__oa_regs(struct xe_oa_stream *stream)
{
- return &stream->hwe->oa_unit->regs;
+ return &stream->oa_unit->regs;
}
static u32 xe_oa_hw_tail_read(struct xe_oa_stream *stream)
@@ -454,7 +460,7 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
{
- return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
+ return stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
}
@@ -475,7 +481,7 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
__oa_ccs_select(stream) | OAG_OACONTROL_OA_COUNTER_ENABLE;
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
- stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
@@ -838,11 +844,16 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
/* Reset PMON Enable to save power. */
xe_mmio_rmw32(mmio, XELPMP_SQCNT1, sqcnt1, 0);
+
+ if ((stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM ||
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM_SAG) &&
+ GRAPHICS_VER(stream->oa->xe) >= 30)
+ xe_mmio_rmw32(mmio, OAM_COMPRESSION_T3_CONTROL, OAM_LAT_MEASURE_ENABLE, 0);
}
static void xe_oa_stream_destroy(struct xe_oa_stream *stream)
{
- struct xe_oa_unit *u = stream->hwe->oa_unit;
+ struct xe_oa_unit *u = stream->oa_unit;
struct xe_gt *gt = stream->hwe->gt;
if (WARN_ON(stream != u->exclusive_stream))
@@ -1105,9 +1116,13 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
*/
sqcnt1 = SQCNT1_PMON_ENABLE |
(HAS_OA_BPC_REPORTING(stream->oa->xe) ? SQCNT1_OABPC : 0);
-
xe_mmio_rmw32(mmio, XELPMP_SQCNT1, 0, sqcnt1);
+ if ((stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM ||
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAM_SAG) &&
+ GRAPHICS_VER(stream->oa->xe) >= 30)
+ xe_mmio_rmw32(mmio, OAM_COMPRESSION_T3_CONTROL, 0, OAM_LAT_MEASURE_ENABLE);
+
/* Configure OAR/OAC */
if (stream->exec_q) {
ret = xe_oa_configure_oa_context(stream, true);
@@ -1139,14 +1154,31 @@ static int decode_oa_format(struct xe_oa *oa, u64 fmt, enum xe_oa_format_name *n
return -EINVAL;
}
+static struct xe_oa_unit *xe_oa_lookup_oa_unit(struct xe_oa *oa, u32 oa_unit_id)
+{
+ struct xe_gt *gt;
+ int gt_id, i;
+
+ for_each_gt(gt, oa->xe, gt_id) {
+ for (i = 0; i < gt->oa.num_oa_units; i++) {
+ struct xe_oa_unit *u = &gt->oa.oa_unit[i];
+
+ if (u->oa_unit_id == oa_unit_id)
+ return u;
+ }
+ }
+
+ return NULL;
+}
+
static int xe_oa_set_prop_oa_unit_id(struct xe_oa *oa, u64 value,
struct xe_oa_open_param *param)
{
- if (value >= oa->oa_unit_ids) {
+ param->oa_unit = xe_oa_lookup_oa_unit(oa, value);
+ if (!param->oa_unit) {
drm_dbg(&oa->xe->drm, "OA unit ID out of range %lld\n", value);
return -EINVAL;
}
- param->oa_unit_id = value;
return 0;
}
@@ -1677,13 +1709,13 @@ static const struct file_operations xe_oa_fops = {
static int xe_oa_stream_init(struct xe_oa_stream *stream,
struct xe_oa_open_param *param)
{
- struct xe_oa_unit *u = param->hwe->oa_unit;
struct xe_gt *gt = param->hwe->gt;
unsigned int fw_ref;
int ret;
stream->exec_q = param->exec_q;
stream->poll_period_ns = DEFAULT_POLL_PERIOD_NS;
+ stream->oa_unit = param->oa_unit;
stream->hwe = param->hwe;
stream->gt = stream->hwe->gt;
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
@@ -1704,7 +1736,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
* buffer whose size, circ_size, is a multiple of the report size
*/
if (GRAPHICS_VER(stream->oa->xe) >= 20 &&
- stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
+ stream->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG && stream->sample)
stream->oa_buffer.circ_size =
param->oa_buffer_size -
param->oa_buffer_size % stream->oa_buffer.format->size;
@@ -1762,7 +1794,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
drm_dbg(&stream->oa->xe->drm, "opening stream oa config uuid=%s\n",
stream->oa_config->uuid);
- WRITE_ONCE(u->exclusive_stream, stream);
+ WRITE_ONCE(stream->oa_unit->exclusive_stream, stream);
hrtimer_setup(&stream->poll_check_timer, xe_oa_poll_check_timer_cb, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
@@ -1798,7 +1830,7 @@ static int xe_oa_stream_open_ioctl_locked(struct xe_oa *oa,
int ret;
/* We currently only allow exclusive access */
- if (param->hwe->oa_unit->exclusive_stream) {
+ if (param->oa_unit->exclusive_stream) {
drm_dbg(&oa->xe->drm, "OA unit already in use\n");
ret = -EBUSY;
goto exit;
@@ -1874,13 +1906,14 @@ static u64 oa_exponent_to_ns(struct xe_gt *gt, int exponent)
return div_u64(nom + den - 1, den);
}
-static bool engine_supports_oa_format(const struct xe_hw_engine *hwe, int type)
+static bool oa_unit_supports_oa_format(struct xe_oa_open_param *param, int type)
{
- switch (hwe->oa_unit->type) {
+ switch (param->oa_unit->type) {
case DRM_XE_OA_UNIT_TYPE_OAG:
return type == DRM_XE_OA_FMT_TYPE_OAG || type == DRM_XE_OA_FMT_TYPE_OAR ||
type == DRM_XE_OA_FMT_TYPE_OAC || type == DRM_XE_OA_FMT_TYPE_PEC;
case DRM_XE_OA_UNIT_TYPE_OAM:
+ case DRM_XE_OA_UNIT_TYPE_OAM_SAG:
return type == DRM_XE_OA_FMT_TYPE_OAM || type == DRM_XE_OA_FMT_TYPE_OAM_MPEC;
default:
return false;
@@ -1899,37 +1932,48 @@ u16 xe_oa_unit_id(struct xe_hw_engine *hwe)
hwe->oa_unit->oa_unit_id : U16_MAX;
}
+/* A hwe must be assigned to stream/oa_unit for batch submissions */
static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param)
{
- struct xe_gt *gt;
- int i, ret = 0;
+ struct xe_hw_engine *hwe;
+ enum xe_hw_engine_id id;
+ int ret = 0;
+
+ /* If not provided, OA unit defaults to OA unit 0 as per uapi */
+ if (!param->oa_unit)
+ param->oa_unit = &xe_device_get_gt(oa->xe, 0)->oa.oa_unit[0];
+ /* When we have an exec_q, get hwe from the exec_q */
if (param->exec_q) {
- /* When we have an exec_q, get hwe from the exec_q */
param->hwe = xe_gt_hw_engine(param->exec_q->gt, param->exec_q->class,
param->engine_instance, true);
- } else {
- struct xe_hw_engine *hwe;
- enum xe_hw_engine_id id;
-
- /* Else just get the first hwe attached to the oa unit */
- for_each_gt(gt, oa->xe, i) {
- for_each_hw_engine(hwe, gt, id) {
- if (xe_oa_unit_id(hwe) == param->oa_unit_id) {
- param->hwe = hwe;
- goto out;
- }
- }
- }
+ if (!param->hwe || param->hwe->oa_unit != param->oa_unit)
+ goto err;
+ goto out;
}
-out:
- if (!param->hwe || xe_oa_unit_id(param->hwe) != param->oa_unit_id) {
- drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n",
- param->exec_q ? param->exec_q->class : -1,
- param->engine_instance, param->oa_unit_id);
- ret = -EINVAL;
+
+ /* Else just get the first hwe attached to the oa unit */
+ for_each_hw_engine(hwe, param->oa_unit->gt, id) {
+ if (hwe->oa_unit == param->oa_unit) {
+ param->hwe = hwe;
+ goto out;
+ }
}
+ /* If we still didn't find a hwe, just get one with a valid oa_unit from the same gt */
+ for_each_hw_engine(hwe, param->oa_unit->gt, id) {
+ if (!hwe->oa_unit)
+ continue;
+
+ param->hwe = hwe;
+ goto out;
+ }
+err:
+ drm_dbg(&oa->xe->drm, "Unable to find hwe (%d, %d) for OA unit ID %d\n",
+ param->exec_q ? param->exec_q->class : -1,
+ param->engine_instance, param->oa_unit->oa_unit_id);
+ ret = -EINVAL;
+out:
return ret;
}
@@ -2007,7 +2051,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
f = &oa->oa_formats[param.oa_format];
if (!param.oa_format || !f->size ||
- !engine_supports_oa_format(param.hwe, f->type)) {
+ !oa_unit_supports_oa_format(&param, f->type)) {
drm_dbg(&oa->xe->drm, "Invalid OA format %d type %d size %d for class %d\n",
param.oa_format, f->type, f->size, param.hwe->class);
ret = -EINVAL;
@@ -2155,6 +2199,7 @@ static const struct xe_mmio_range gen12_oa_mux_regs[] = {
static const struct xe_mmio_range xe2_oa_mux_regs[] = {
{ .start = 0x5194, .end = 0x5194 }, /* SYS_MEM_LAT_MEASURE_MERTF_GRP_3D */
{ .start = 0x8704, .end = 0x8704 }, /* LMEM_LAT_MEASURE_MCFG_GRP */
+ { .start = 0xB01C, .end = 0xB01C }, /* LNCF_MISC_CONFIG_REGISTER0 */
{ .start = 0xB1BC, .end = 0xB1BC }, /* L3_BANK_LAT_MEASURE_LBCF_GFX */
{ .start = 0xD0E0, .end = 0xD0F4 }, /* VISACTL */
{ .start = 0xE18C, .end = 0xE18C }, /* SAMPLER_MODE */
@@ -2448,20 +2493,38 @@ int xe_oa_register(struct xe_device *xe)
static u32 num_oa_units_per_gt(struct xe_gt *gt)
{
- return 1;
+ if (!xe_gt_is_media_type(gt) || GRAPHICS_VER(gt_to_xe(gt)) < 20)
+ return 1;
+ else if (!IS_DGFX(gt_to_xe(gt)))
+ return XE_OAM_UNIT_SCMI_0 + 1; /* SAG + SCMI_0 */
+ else
+ return XE_OAM_UNIT_SCMI_1 + 1; /* SAG + SCMI_0 + SCMI_1 */
}
static u32 __hwe_oam_unit(struct xe_hw_engine *hwe)
{
- if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) >= 1270) {
- /*
- * There's 1 SAMEDIA gt and 1 OAM per SAMEDIA gt. All media slices
- * within the gt use the same OAM. All MTL/LNL SKUs list 1 SA MEDIA
- */
- xe_gt_WARN_ON(hwe->gt, hwe->gt->info.type != XE_GT_TYPE_MEDIA);
+ if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) < 1270)
+ return XE_OA_UNIT_INVALID;
+
+ xe_gt_WARN_ON(hwe->gt, !xe_gt_is_media_type(hwe->gt));
+ if (GRAPHICS_VER(gt_to_xe(hwe->gt)) < 20)
return 0;
- }
+ /*
+ * XE_OAM_UNIT_SAG has only GSCCS attached to it, but only on some platforms. Also
+ * GSCCS cannot be used to submit batches to program the OAM unit. Therefore we don't
+ * assign an OA unit to GSCCS. This means that XE_OAM_UNIT_SAG is exposed as an OA
+ * unit without attached engines. Fused off engines can also result in oa_unit's with
+ * num_engines == 0. OA streams can be opened on all OA units.
+ */
+ else if (hwe->engine_id == XE_HW_ENGINE_GSCCS0)
+ return XE_OA_UNIT_INVALID;
+ else if (!IS_DGFX(gt_to_xe(hwe->gt)))
+ return XE_OAM_UNIT_SCMI_0;
+ else if (hwe->class == XE_ENGINE_CLASS_VIDEO_DECODE)
+ return (hwe->instance / 2 & 0x1) + 1;
+ else if (hwe->class == XE_ENGINE_CLASS_VIDEO_ENHANCE)
+ return (hwe->instance & 0x1) + 1;
return XE_OA_UNIT_INVALID;
}
@@ -2475,6 +2538,7 @@ static u32 __hwe_oa_unit(struct xe_hw_engine *hwe)
case XE_ENGINE_CLASS_VIDEO_DECODE:
case XE_ENGINE_CLASS_VIDEO_ENHANCE:
+ case XE_ENGINE_CLASS_OTHER:
return __hwe_oam_unit(hwe);
default:
@@ -2514,20 +2578,29 @@ static struct xe_oa_regs __oag_regs(void)
static void __xe_oa_init_oa_units(struct xe_gt *gt)
{
- const u32 mtl_oa_base[] = { 0x13000 };
+ /* Actual address is MEDIA_GT_GSI_OFFSET + oam_base_addr[i] */
+ const u32 oam_base_addr[] = {
+ [XE_OAM_UNIT_SAG] = 0x13000,
+ [XE_OAM_UNIT_SCMI_0] = 0x14000,
+ [XE_OAM_UNIT_SCMI_1] = 0x14800,
+ };
int i, num_units = gt->oa.num_oa_units;
for (i = 0; i < num_units; i++) {
struct xe_oa_unit *u = &gt->oa.oa_unit[i];
- if (gt->info.type != XE_GT_TYPE_MEDIA) {
+ if (!xe_gt_is_media_type(gt)) {
u->regs = __oag_regs();
u->type = DRM_XE_OA_UNIT_TYPE_OAG;
- } else if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
- u->regs = __oam_regs(mtl_oa_base[i]);
- u->type = DRM_XE_OA_UNIT_TYPE_OAM;
+ } else {
+ xe_gt_assert(gt, GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270);
+ u->regs = __oam_regs(oam_base_addr[i]);
+ u->type = i == XE_OAM_UNIT_SAG && GRAPHICS_VER(gt_to_xe(gt)) >= 20 ?
+ DRM_XE_OA_UNIT_TYPE_OAM_SAG : DRM_XE_OA_UNIT_TYPE_OAM;
}
+ u->gt = gt;
+
xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
/* Ensure MMIO trigger remains disabled till there is a stream */
@@ -2560,10 +2633,6 @@ static int xe_oa_init_gt(struct xe_gt *gt)
}
}
- /*
- * Fused off engines can result in oa_unit's with num_engines == 0. These units
- * will appear in OA unit query, but no OA streams can be opened on them.
- */
gt->oa.num_oa_units = num_oa_units;
gt->oa.oa_unit = u;
@@ -2574,17 +2643,54 @@ static int xe_oa_init_gt(struct xe_gt *gt)
return 0;
}
+static void xe_oa_print_gt_oa_units(struct xe_gt *gt)
+{
+ enum xe_hw_engine_id hwe_id;
+ struct xe_hw_engine *hwe;
+ struct xe_oa_unit *u;
+ char buf[256];
+ int i, n;
+
+ for (i = 0; i < gt->oa.num_oa_units; i++) {
+ u = &gt->oa.oa_unit[i];
+ buf[0] = '\0';
+ n = 0;
+
+ for_each_hw_engine(hwe, gt, hwe_id)
+ if (xe_oa_unit_id(hwe) == u->oa_unit_id)
+ n += scnprintf(buf + n, sizeof(buf) - n, "%s ", hwe->name);
+
+ xe_gt_dbg(gt, "oa_unit %d, type %d, Engines: %s\n", u->oa_unit_id, u->type, buf);
+ }
+}
+
+static void xe_oa_print_oa_units(struct xe_oa *oa)
+{
+ struct xe_gt *gt;
+ int gt_id;
+
+ for_each_gt(gt, oa->xe, gt_id)
+ xe_oa_print_gt_oa_units(gt);
+}
+
static int xe_oa_init_oa_units(struct xe_oa *oa)
{
struct xe_gt *gt;
int i, ret;
+ /* Needed for OAM implementation here */
+ BUILD_BUG_ON(XE_OAM_UNIT_SAG != 0);
+ BUILD_BUG_ON(XE_OAM_UNIT_SCMI_0 != 1);
+ BUILD_BUG_ON(XE_OAM_UNIT_SCMI_1 != 2);
+
for_each_gt(gt, oa->xe, i) {
ret = xe_oa_init_gt(gt);
if (ret)
return ret;
}
+ xe_oa_print_oa_units(oa);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 52e33c37d5ee..2628f78c4e8d 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -95,6 +95,9 @@ struct xe_oa_unit {
/** @oa_unit_id: identifier for the OA unit */
u16 oa_unit_id;
+ /** @gt: gt associated with the OA unit */
+ struct xe_gt *gt;
+
/** @type: Type of OA unit - OAM, OAG etc. */
enum drm_xe_oa_unit_type type;
@@ -182,6 +185,9 @@ struct xe_oa_stream {
/** @gt: gt associated with the oa stream */
struct xe_gt *gt;
+ /** @oa_unit: oa unit for this stream */
+ struct xe_oa_unit *oa_unit;
+
/** @hwe: hardware engine associated with this oa stream */
struct xe_hw_engine *hwe;
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 30fdbdb9341e..2e7cb99ae87a 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -103,7 +103,8 @@ static const struct xe_pat_table_entry xelpg_pat_table[] = {
*
* Note: There is an implicit assumption in the driver that compression and
* coh_1way+ are mutually exclusive. If this is ever not true then userptr
- * and imported dma-buf from external device will have uncleared ccs state.
+ * and imported dma-buf from external device will have uncleared ccs state. See
+ * also xe_bo_needs_ccs_pages().
*/
#define XE2_PAT(no_promote, comp_en, l3clos, l3_policy, l4_policy, __coh_mode) \
{ \
@@ -162,21 +163,35 @@ u16 xe_pat_index_get_coh_mode(struct xe_device *xe, u16 pat_index)
static void program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
int n_entries)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
for (int i = 0; i < n_entries; i++) {
struct xe_reg reg = XE_REG(_PAT_INDEX(i));
xe_mmio_write32(&gt->mmio, reg, table[i].value);
}
+
+ if (xe->pat.pat_ats)
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe->pat.pat_ats->value);
+ if (xe->pat.pat_pta)
+ xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe->pat.pat_pta->value);
}
static void program_pat_mcr(struct xe_gt *gt, const struct xe_pat_table_entry table[],
int n_entries)
{
+ struct xe_device *xe = gt_to_xe(gt);
+
for (int i = 0; i < n_entries; i++) {
struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i));
xe_gt_mcr_multicast_write(gt, reg_mcr, table[i].value);
}
+
+ if (xe->pat.pat_ats)
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe->pat.pat_ats->value);
+ if (xe->pat.pat_pta)
+ xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe->pat.pat_pta->value);
}
static void xelp_dump(struct xe_gt *gt, struct drm_printer *p)
@@ -303,26 +318,6 @@ static const struct xe_pat_ops xelpg_pat_ops = {
.dump = xelpg_dump,
};
-static void xe2lpg_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
- int n_entries)
-{
- program_pat_mcr(gt, table, n_entries);
- xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_ATS), xe2_pat_ats.value);
-
- if (IS_DGFX(gt_to_xe(gt)))
- xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_PTA), xe2_pat_pta.value);
-}
-
-static void xe2lpm_program_pat(struct xe_gt *gt, const struct xe_pat_table_entry table[],
- int n_entries)
-{
- program_pat(gt, table, n_entries);
- xe_mmio_write32(&gt->mmio, XE_REG(_PAT_ATS), xe2_pat_ats.value);
-
- if (IS_DGFX(gt_to_xe(gt)))
- xe_mmio_write32(&gt->mmio, XE_REG(_PAT_PTA), xe2_pat_pta.value);
-}
-
static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
{
struct xe_device *xe = gt_to_xe(gt);
@@ -375,8 +370,8 @@ static void xe2_dump(struct xe_gt *gt, struct drm_printer *p)
}
static const struct xe_pat_ops xe2_pat_ops = {
- .program_graphics = xe2lpg_program_pat,
- .program_media = xe2lpm_program_pat,
+ .program_graphics = program_pat_mcr,
+ .program_media = program_pat,
.dump = xe2_dump,
};
@@ -385,6 +380,9 @@ void xe_pat_init_early(struct xe_device *xe)
if (GRAPHICS_VER(xe) == 30 || GRAPHICS_VER(xe) == 20) {
xe->pat.ops = &xe2_pat_ops;
xe->pat.table = xe2_pat_table;
+ xe->pat.pat_ats = &xe2_pat_ats;
+ if (IS_DGFX(xe))
+ xe->pat.pat_pta = &xe2_pat_pta;
/* Wa_16023588340. XXX: Should use XE_WA */
if (GRAPHICS_VERx100(xe) == 2001)
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index ac4beaed58ff..89814b32e585 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -180,6 +180,7 @@ static const struct xe_ip graphics_ips[] = {
{ 1271, "Xe_LPG", &graphics_xelpg },
{ 1274, "Xe_LPG+", &graphics_xelpg },
{ 2001, "Xe2_HPG", &graphics_xe2 },
+ { 2002, "Xe2_HPG", &graphics_xe2 },
{ 2004, "Xe2_LPG", &graphics_xe2 },
{ 3000, "Xe3_LPG", &graphics_xe2 },
{ 3001, "Xe3_LPG", &graphics_xe2 },
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index b04756a97cdc..c8e63bd23300 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -907,6 +907,11 @@ bool xe_pt_zap_ptes(struct xe_tile *tile, struct xe_vma *vma)
struct xe_pt *pt = xe_vma_vm(vma)->pt_root[tile->id];
u8 pt_mask = (vma->tile_present & ~vma->tile_invalidated);
+ if (xe_vma_bo(vma))
+ xe_bo_assert_held(xe_vma_bo(vma));
+ else if (xe_vma_is_userptr(vma))
+ lockdep_assert_held(&xe_vma_vm(vma)->userptr.notifier_lock);
+
if (!(pt_mask & BIT(tile->id)))
return false;
@@ -1458,6 +1463,7 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
struct xe_vm *vm = pt_update->vops->vm;
struct xe_vma_ops *vops = pt_update->vops;
struct xe_vma_op *op;
+ unsigned long i;
int err;
err = xe_pt_pre_commit(pt_update);
@@ -1467,20 +1473,35 @@ static int xe_pt_svm_pre_commit(struct xe_migrate_pt_update *pt_update)
xe_svm_notifier_lock(vm);
list_for_each_entry(op, &vops->list, link) {
- struct xe_svm_range *range = op->map_range.range;
+ struct xe_svm_range *range = NULL;
if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
continue;
- xe_svm_range_debug(range, "PRE-COMMIT");
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
+ xe_assert(vm->xe,
+ xe_vma_is_cpu_addr_mirror(gpuva_to_vma(op->base.prefetch.va)));
+ xa_for_each(&op->prefetch_range.range, i, range) {
+ xe_svm_range_debug(range, "PRE-COMMIT");
- xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
- xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -ENODATA;
+ }
+ }
+ } else {
+ xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(op->map_range.vma));
+ xe_assert(vm->xe, op->subop == XE_VMA_SUBOP_MAP_RANGE);
+ range = op->map_range.range;
- if (!xe_svm_range_pages_valid(range)) {
- xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
- xe_svm_notifier_unlock(vm);
- return -EAGAIN;
+ xe_svm_range_debug(range, "PRE-COMMIT");
+
+ if (!xe_svm_range_pages_valid(range)) {
+ xe_svm_range_debug(range, "PRE-COMMIT - RETRY");
+ xe_svm_notifier_unlock(vm);
+ return -EAGAIN;
+ }
}
}
@@ -1974,6 +1995,32 @@ static int unbind_op_prepare(struct xe_tile *tile,
return 0;
}
+static bool
+xe_pt_op_check_range_skip_invalidation(struct xe_vm_pgtable_update_op *pt_op,
+ struct xe_svm_range *range)
+{
+ struct xe_vm_pgtable_update *update = pt_op->entries;
+
+ XE_WARN_ON(!pt_op->num_entries);
+
+ /*
+ * We can't skip the invalidation if we are removing PTEs that span more
+ * than the range, do some checks to ensure we are removing PTEs that
+ * are invalid.
+ */
+
+ if (pt_op->num_entries > 1)
+ return false;
+
+ if (update->pt->level == 0)
+ return true;
+
+ if (update->pt->level == 1)
+ return xe_svm_range_size(range) >= SZ_2M;
+
+ return false;
+}
+
static int unbind_range_prepare(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -2002,7 +2049,10 @@ static int unbind_range_prepare(struct xe_vm *vm,
range->base.itree.last + 1);
++pt_update_ops->current_op;
pt_update_ops->needs_svm_lock = true;
- pt_update_ops->needs_invalidation = true;
+ pt_update_ops->needs_invalidation |= xe_vm_has_scratch(vm) ||
+ xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
+ range->tile_invalidated) ||
+ !xe_pt_op_check_range_skip_invalidation(pt_op, range);
xe_pt_commit_prepare_unbind(XE_INVALID_VMA, pt_op->entries,
pt_op->num_entries);
@@ -2065,11 +2115,20 @@ static int op_prepare(struct xe_vm *vm,
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- if (xe_vma_is_cpu_addr_mirror(vma))
- break;
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ struct xe_svm_range *range;
+ unsigned long i;
- err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
- pt_update_ops->wait_vm_kernel = true;
+ xa_for_each(&op->prefetch_range.range, i, range) {
+ err = bind_range_prepare(vm, tile, pt_update_ops,
+ vma, range);
+ if (err)
+ return err;
+ }
+ } else {
+ err = bind_op_prepare(vm, tile, pt_update_ops, vma, false);
+ pt_update_ops->wait_vm_kernel = true;
+ }
break;
}
case DRM_GPUVA_OP_DRIVER:
@@ -2166,10 +2225,15 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
}
- vma->tile_present |= BIT(tile->id);
- vma->tile_staged &= ~BIT(tile->id);
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ WRITE_ONCE(vma->tile_present, vma->tile_present | BIT(tile->id));
if (invalidate_on_bind)
- vma->tile_invalidated |= BIT(tile->id);
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated | BIT(tile->id));
+ else
+ WRITE_ONCE(vma->tile_invalidated,
+ vma->tile_invalidated & ~BIT(tile->id));
+ vma->tile_staged &= ~BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
lockdep_assert_held_read(&vm->userptr.notifier_lock);
to_userptr_vma(vma)->userptr.initial_bind = true;
@@ -2216,6 +2280,18 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
}
}
+static void range_present_and_invalidated_tile(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_id)
+{
+ /* All WRITE_ONCE pair with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+
+ lockdep_assert_held(&vm->svm.gpusvm.notifier_lock);
+
+ WRITE_ONCE(range->tile_present, range->tile_present | BIT(tile_id));
+ WRITE_ONCE(range->tile_invalidated, range->tile_invalidated & ~BIT(tile_id));
+}
+
static void op_commit(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
@@ -2263,27 +2339,28 @@ static void op_commit(struct xe_vm *vm,
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- if (!xe_vma_is_cpu_addr_mirror(vma))
+ if (xe_vma_is_cpu_addr_mirror(vma)) {
+ struct xe_svm_range *range = NULL;
+ unsigned long i;
+
+ xa_for_each(&op->prefetch_range.range, i, range)
+ range_present_and_invalidated_tile(vm, range, tile->id);
+ } else {
bind_op_commit(vm, tile, pt_update_ops, vma, fence,
fence2, false);
+ }
break;
}
case DRM_GPUVA_OP_DRIVER:
{
- /* WRITE_ONCE pairs with READ_ONCE in xe_svm.c */
-
- if (op->subop == XE_VMA_SUBOP_MAP_RANGE) {
- WRITE_ONCE(op->map_range.range->tile_present,
- op->map_range.range->tile_present |
- BIT(tile->id));
- WRITE_ONCE(op->map_range.range->tile_invalidated,
- op->map_range.range->tile_invalidated &
- ~BIT(tile->id));
- } else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE) {
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ if (op->subop == XE_VMA_SUBOP_MAP_RANGE)
+ range_present_and_invalidated_tile(vm, op->map_range.range, tile->id);
+ else if (op->subop == XE_VMA_SUBOP_UNMAP_RANGE)
WRITE_ONCE(op->unmap_range.range->tile_present,
op->unmap_range.range->tile_present &
~BIT(tile->id));
- }
+
break;
}
default:
@@ -2476,7 +2553,7 @@ free_ifence:
kfree(mfence);
kfree(ifence);
kill_vm_tile1:
- if (err != -EAGAIN && tile->id)
+ if (err != -EAGAIN && err != -ENODATA && tile->id)
xe_vm_kill(vops->vm, false);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index b5bc15f436fa..3d62008c99f1 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -504,69 +504,62 @@ int xe_pxp_exec_queue_set_type(struct xe_pxp *pxp, struct xe_exec_queue *q, u8 t
return 0;
}
-static void __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+static int __exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
{
- spin_lock_irq(&pxp->queues.lock);
- list_add_tail(&q->pxp.link, &pxp->queues.list);
- spin_unlock_irq(&pxp->queues.lock);
+ int ret = 0;
+
+ /*
+ * A queue can be added to the list only if the PXP is in active status,
+ * otherwise the termination might not handle it correctly.
+ */
+ mutex_lock(&pxp->mutex);
+
+ if (pxp->status == XE_PXP_ACTIVE) {
+ spin_lock_irq(&pxp->queues.lock);
+ list_add_tail(&q->pxp.link, &pxp->queues.list);
+ spin_unlock_irq(&pxp->queues.lock);
+ } else if (pxp->status == XE_PXP_ERROR || pxp->status == XE_PXP_SUSPENDED) {
+ ret = -EIO;
+ } else {
+ ret = -EBUSY; /* try again later */
+ }
+
+ mutex_unlock(&pxp->mutex);
+
+ return ret;
}
-/**
- * xe_pxp_exec_queue_add - add a queue to the PXP list
- * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
- * @q: the queue to add to the list
- *
- * If PXP is enabled and the prerequisites are done, start the PXP ARB
- * session (if not already running) and add the queue to the PXP list. Note
- * that the queue must have previously been marked as using PXP with
- * xe_pxp_exec_queue_set_type.
- *
- * Returns 0 if the PXP ARB session is running and the queue is in the list,
- * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
- * other errno value if something goes wrong during the session start.
- */
-int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+static int pxp_start(struct xe_pxp *pxp, u8 type)
{
int ret = 0;
+ bool restart = false;
if (!xe_pxp_is_enabled(pxp))
return -ENODEV;
/* we only support HWDRM sessions right now */
- xe_assert(pxp->xe, q->pxp.type == DRM_XE_PXP_TYPE_HWDRM);
-
- /*
- * Runtime suspend kills PXP, so we take a reference to prevent it from
- * happening while we have active queues that use PXP
- */
- xe_pm_runtime_get(pxp->xe);
+ xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM);
/* get_readiness_status() returns 0 for in-progress and 1 for done */
ret = xe_pxp_get_readiness_status(pxp);
- if (ret <= 0) {
- if (!ret)
- ret = -EBUSY;
- goto out;
- }
+ if (ret <= 0)
+ return ret ?: -EBUSY;
+
ret = 0;
wait_for_idle:
/*
* if there is an action in progress, wait for it. We need to wait
* outside the lock because the completion is done from within the lock.
- * Note that the two action should never be pending at the same time.
+ * Note that the two actions should never be pending at the same time.
*/
if (!wait_for_completion_timeout(&pxp->termination,
- msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ msecs_to_jiffies(PXP_TERMINATION_TIMEOUT_MS)))
+ return -ETIMEDOUT;
if (!wait_for_completion_timeout(&pxp->activation,
- msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS))) {
- ret = -ETIMEDOUT;
- goto out;
- }
+ msecs_to_jiffies(PXP_ACTIVATION_TIMEOUT_MS)))
+ return -ETIMEDOUT;
mutex_lock(&pxp->mutex);
@@ -574,11 +567,9 @@ wait_for_idle:
switch (pxp->status) {
case XE_PXP_ERROR:
ret = -EIO;
- break;
+ goto out_unlock;
case XE_PXP_ACTIVE:
- __exec_queue_add(pxp, q);
- mutex_unlock(&pxp->mutex);
- goto out;
+ goto out_unlock;
case XE_PXP_READY_TO_START:
pxp->status = XE_PXP_START_IN_PROGRESS;
reinit_completion(&pxp->activation);
@@ -586,8 +577,8 @@ wait_for_idle:
case XE_PXP_START_IN_PROGRESS:
/* If a start is in progress then the completion must not be done */
XE_WARN_ON(completion_done(&pxp->activation));
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
case XE_PXP_NEEDS_TERMINATION:
mark_termination_in_progress(pxp);
break;
@@ -595,29 +586,25 @@ wait_for_idle:
case XE_PXP_NEEDS_ADDITIONAL_TERMINATION:
/* If a termination is in progress then the completion must not be done */
XE_WARN_ON(completion_done(&pxp->termination));
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
case XE_PXP_SUSPENDED:
default:
drm_err(&pxp->xe->drm, "unexpected state during PXP start: %u\n", pxp->status);
ret = -EIO;
- break;
+ goto out_unlock;
}
mutex_unlock(&pxp->mutex);
- if (ret)
- goto out;
-
if (!completion_done(&pxp->termination)) {
ret = pxp_terminate_hw(pxp);
if (ret) {
drm_err(&pxp->xe->drm, "PXP termination failed before start\n");
mutex_lock(&pxp->mutex);
pxp->status = XE_PXP_ERROR;
- mutex_unlock(&pxp->mutex);
- goto out;
+ goto out_unlock;
}
goto wait_for_idle;
@@ -639,21 +626,59 @@ wait_for_idle:
if (pxp->status != XE_PXP_START_IN_PROGRESS) {
drm_err(&pxp->xe->drm, "unexpected state after PXP start: %u\n", pxp->status);
pxp->status = XE_PXP_NEEDS_TERMINATION;
- mutex_unlock(&pxp->mutex);
- goto wait_for_idle;
+ restart = true;
+ goto out_unlock;
}
/* If everything went ok, update the status and add the queue to the list */
- if (!ret) {
+ if (!ret)
pxp->status = XE_PXP_ACTIVE;
- __exec_queue_add(pxp, q);
- } else {
+ else
pxp->status = XE_PXP_ERROR;
- }
+out_unlock:
mutex_unlock(&pxp->mutex);
-out:
+ if (restart)
+ goto wait_for_idle;
+
+ return ret;
+}
+
+/**
+ * xe_pxp_exec_queue_add - add a queue to the PXP list
+ * @pxp: the xe->pxp pointer (it will be NULL if PXP is disabled)
+ * @q: the queue to add to the list
+ *
+ * If PXP is enabled and the prerequisites are done, start the PXP default
+ * session (if not already running) and add the queue to the PXP list.
+ *
+ * Returns 0 if the PXP session is running and the queue is in the list,
+ * -ENODEV if PXP is disabled, -EBUSY if the PXP prerequisites are not done,
+ * other errno value if something goes wrong during the session start.
+ */
+int xe_pxp_exec_queue_add(struct xe_pxp *pxp, struct xe_exec_queue *q)
+{
+ int ret;
+
+ if (!xe_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ /*
+ * Runtime suspend kills PXP, so we take a reference to prevent it from
+ * happening while we have active queues that use PXP
+ */
+ xe_pm_runtime_get(pxp->xe);
+
+start:
+ ret = pxp_start(pxp, q->pxp.type);
+
+ if (!ret) {
+ ret = __exec_queue_add(pxp, q);
+ if (ret == -EBUSY)
+ goto start;
+ }
+
/*
* in the successful case the PM ref is released from
* xe_pxp_exec_queue_remove
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 2dbf4066d86f..e8e1743dcb1e 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -683,8 +683,8 @@ static int query_oa_units(struct xe_device *xe,
du->oa_timestamp_freq = xe_oa_timestamp_frequency(gt);
du->capabilities = DRM_XE_OA_CAPS_BASE | DRM_XE_OA_CAPS_SYNCS |
DRM_XE_OA_CAPS_OA_BUFFER_SIZE |
- DRM_XE_OA_CAPS_WAIT_NUM_REPORTS;
-
+ DRM_XE_OA_CAPS_WAIT_NUM_REPORTS |
+ DRM_XE_OA_CAPS_OAM;
j = 0;
for_each_hw_engine(hwe, gt, hwe_id) {
if (!xe_hw_engine_is_reserved(hwe) &&
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 1905ca590965..d21bf8f26964 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -113,7 +113,8 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
kref_init(&job->refcount);
xe_exec_queue_get(job->q);
- err = drm_sched_job_init(&job->drm, q->entity, 1, NULL);
+ err = drm_sched_job_init(&job->drm, q->entity, 1, NULL,
+ q->xef ? q->xef->drm->client_id : 0);
if (err)
goto err_free;
@@ -216,15 +217,17 @@ void xe_sched_job_set_error(struct xe_sched_job *job, int error)
bool xe_sched_job_started(struct xe_sched_job *job)
{
+ struct dma_fence *fence = dma_fence_chain_contained(job->fence);
struct xe_lrc *lrc = job->q->lrc[0];
- return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
- xe_lrc_start_seqno(lrc),
- dma_fence_chain_contained(job->fence)->ops);
+ return !__dma_fence_is_later(fence,
+ xe_sched_job_lrc_seqno(job),
+ xe_lrc_start_seqno(lrc));
}
bool xe_sched_job_completed(struct xe_sched_job *job)
{
+ struct dma_fence *fence = dma_fence_chain_contained(job->fence);
struct xe_lrc *lrc = job->q->lrc[0];
/*
@@ -232,9 +235,9 @@ bool xe_sched_job_completed(struct xe_sched_job *job)
* parallel handshake is done.
*/
- return !__dma_fence_is_later(xe_sched_job_lrc_seqno(job),
- xe_lrc_seqno(lrc),
- dma_fence_chain_contained(job->fence)->ops);
+ return !__dma_fence_is_later(fence,
+ xe_sched_job_lrc_seqno(job),
+ xe_lrc_seqno(lrc));
}
void xe_sched_job_arm(struct xe_sched_job *job)
diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c
index 86d47aaf0358..125c836e0ee4 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.c
+++ b/drivers/gpu/drm/xe/xe_shrinker.c
@@ -5,6 +5,7 @@
#include <linux/shrinker.h>
+#include <drm/drm_managed.h>
#include <drm/ttm/ttm_backup.h>
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_tt.h>
@@ -213,24 +214,34 @@ static void xe_shrinker_pm(struct work_struct *work)
xe_pm_runtime_put(shrinker->xe);
}
+static void xe_shrinker_fini(struct drm_device *drm, void *arg)
+{
+ struct xe_shrinker *shrinker = arg;
+
+ xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
+ xe_assert(shrinker->xe, !shrinker->purgeable_pages);
+ shrinker_free(shrinker->shrink);
+ flush_work(&shrinker->pm_worker);
+ kfree(shrinker);
+}
+
/**
* xe_shrinker_create() - Create an xe per-device shrinker
* @xe: Pointer to the xe device.
*
- * Returns: A pointer to the created shrinker on success,
- * Negative error code on failure.
+ * Return: %0 on success. Negative error code on failure.
*/
-struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
+int xe_shrinker_create(struct xe_device *xe)
{
struct xe_shrinker *shrinker = kzalloc(sizeof(*shrinker), GFP_KERNEL);
if (!shrinker)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
shrinker->shrink = shrinker_alloc(0, "drm-xe_gem:%s", xe->drm.unique);
if (!shrinker->shrink) {
kfree(shrinker);
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
INIT_WORK(&shrinker->pm_worker, xe_shrinker_pm);
@@ -240,19 +251,7 @@ struct xe_shrinker *xe_shrinker_create(struct xe_device *xe)
shrinker->shrink->scan_objects = xe_shrinker_scan;
shrinker->shrink->private_data = shrinker;
shrinker_register(shrinker->shrink);
+ xe->mem.shrinker = shrinker;
- return shrinker;
-}
-
-/**
- * xe_shrinker_destroy() - Destroy an xe per-device shrinker
- * @shrinker: Pointer to the shrinker to destroy.
- */
-void xe_shrinker_destroy(struct xe_shrinker *shrinker)
-{
- xe_assert(shrinker->xe, !shrinker->shrinkable_pages);
- xe_assert(shrinker->xe, !shrinker->purgeable_pages);
- shrinker_free(shrinker->shrink);
- flush_work(&shrinker->pm_worker);
- kfree(shrinker);
+ return drmm_add_action_or_reset(&xe->drm, xe_shrinker_fini, shrinker);
}
diff --git a/drivers/gpu/drm/xe/xe_shrinker.h b/drivers/gpu/drm/xe/xe_shrinker.h
index 28a038f4fcbf..5132ae5192e1 100644
--- a/drivers/gpu/drm/xe/xe_shrinker.h
+++ b/drivers/gpu/drm/xe/xe_shrinker.h
@@ -11,8 +11,6 @@ struct xe_device;
void xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgeable);
-struct xe_shrinker *xe_shrinker_create(struct xe_device *xe);
-
-void xe_shrinker_destroy(struct xe_shrinker *shrinker);
+int xe_shrinker_create(struct xe_device *xe);
#endif
diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c
index c1275e64aa9c..6526fe450e55 100644
--- a/drivers/gpu/drm/xe/xe_sriov_vf.c
+++ b/drivers/gpu/drm/xe/xe_sriov_vf.c
@@ -7,12 +7,15 @@
#include "xe_assert.h"
#include "xe_device.h"
+#include "xe_gt.h"
#include "xe_gt_sriov_printk.h"
#include "xe_gt_sriov_vf.h"
+#include "xe_guc_ct.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_sriov_printk.h"
#include "xe_sriov_vf.h"
+#include "xe_tile_sriov_vf.h"
/**
* DOC: VF restore procedure in PF KMD and VF KMD
@@ -121,6 +124,15 @@
* | | |
*/
+static bool vf_migration_supported(struct xe_device *xe)
+{
+ /*
+ * TODO: Add conditions to allow specific platforms, when they're
+ * supported at production quality.
+ */
+ return IS_ENABLED(CONFIG_DRM_XE_DEBUG);
+}
+
static void migration_worker_func(struct work_struct *w);
/**
@@ -130,6 +142,9 @@ static void migration_worker_func(struct work_struct *w);
void xe_sriov_vf_init_early(struct xe_device *xe)
{
INIT_WORK(&xe->sriov.vf.migration.worker, migration_worker_func);
+
+ if (!vf_migration_supported(xe))
+ xe_sriov_info(xe, "migration not supported by this module version\n");
}
/**
@@ -157,6 +172,20 @@ static int vf_post_migration_requery_guc(struct xe_device *xe)
return ret;
}
+static void vf_post_migration_fixup_ctb(struct xe_device *xe)
+{
+ struct xe_gt *gt;
+ unsigned int id;
+
+ xe_assert(xe, IS_SRIOV_VF(xe));
+
+ for_each_gt(gt, xe, id) {
+ s32 shift = xe_gt_sriov_vf_ggtt_shift(gt);
+
+ xe_guc_ct_fixup_messages_with_ggtt(&gt->uc.guc.ct, shift);
+ }
+}
+
/*
* vf_post_migration_imminent - Check if post-restore recovery is coming.
* @xe: the &xe_device struct instance
@@ -170,6 +199,25 @@ static bool vf_post_migration_imminent(struct xe_device *xe)
work_pending(&xe->sriov.vf.migration.worker);
}
+static bool vf_post_migration_fixup_ggtt_nodes(struct xe_device *xe)
+{
+ bool need_fixups = false;
+ struct xe_tile *tile;
+ unsigned int id;
+
+ for_each_tile(tile, xe, id) {
+ struct xe_gt *gt = tile->primary_gt;
+ s64 shift;
+
+ shift = xe_gt_sriov_vf_ggtt_shift(gt);
+ if (shift) {
+ need_fixups = true;
+ xe_tile_sriov_vf_fixup_ggtt_nodes(tile, shift);
+ }
+ }
+ return need_fixups;
+}
+
/*
* Notify all GuCs about resource fixups apply finished.
*/
@@ -191,6 +239,7 @@ skip:
static void vf_post_migration_recovery(struct xe_device *xe)
{
+ bool need_fixups;
int err;
drm_dbg(&xe->drm, "migration recovery in progress\n");
@@ -200,8 +249,17 @@ static void vf_post_migration_recovery(struct xe_device *xe)
goto defer;
if (unlikely(err))
goto fail;
+ if (!vf_migration_supported(xe)) {
+ xe_sriov_err(xe, "migration not supported by this module version\n");
+ err = -ENOTRECOVERABLE;
+ goto fail;
+ }
+ need_fixups = vf_post_migration_fixup_ggtt_nodes(xe);
/* FIXME: add the recovery steps */
+ if (need_fixups)
+ vf_post_migration_fixup_ctb(xe);
+
vf_post_migration_notify_resfix_done(xe);
xe_pm_runtime_put(xe);
drm_notice(&xe->drm, "migration recovery ended\n");
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 6345896585de..26418e9bdff0 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -45,21 +45,6 @@ static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r)
return gpusvm_to_vm(r->gpusvm);
}
-static unsigned long xe_svm_range_start(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_start(&range->base);
-}
-
-static unsigned long xe_svm_range_end(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_end(&range->base);
-}
-
-static unsigned long xe_svm_range_size(struct xe_svm_range *range)
-{
- return drm_gpusvm_range_size(&range->base);
-}
-
#define range_debug(r__, operaton__) \
vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \
"%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \
@@ -103,11 +88,6 @@ static void xe_svm_range_free(struct drm_gpusvm_range *range)
kfree(range);
}
-static struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
-{
- return container_of(r, struct xe_svm_range, base);
-}
-
static void
xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range,
const struct mmu_notifier_range *mmu_range)
@@ -161,7 +141,12 @@ xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r,
for_each_tile(tile, xe, id)
if (xe_pt_zap_ptes_range(tile, vm, range)) {
tile_mask |= BIT(id);
- range->tile_invalidated |= BIT(id);
+ /*
+ * WRITE_ONCE pairs with READ_ONCE in
+ * xe_vm_has_valid_gpu_mapping()
+ */
+ WRITE_ONCE(range->tile_invalidated,
+ range->tile_invalidated | BIT(id));
}
return tile_mask;
@@ -187,14 +172,9 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
{
struct xe_vm *vm = gpusvm_to_vm(gpusvm);
struct xe_device *xe = vm->xe;
- struct xe_tile *tile;
struct drm_gpusvm_range *r, *first;
- struct xe_gt_tlb_invalidation_fence
- fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
u64 adj_start = mmu_range->start, adj_end = mmu_range->end;
u8 tile_mask = 0;
- u8 id;
- u32 fence_id = 0;
long err;
xe_svm_assert_in_notifier(vm);
@@ -240,42 +220,8 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
xe_device_wmb(xe);
- for_each_tile(tile, xe, id) {
- if (tile_mask & BIT(id)) {
- int err;
-
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->primary_gt,
- &fence[fence_id],
- adj_start,
- adj_end,
- vm->usm.asid);
- if (WARN_ON_ONCE(err < 0))
- goto wait;
- ++fence_id;
-
- if (!tile->media_gt)
- continue;
-
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id], true);
-
- err = xe_gt_tlb_invalidation_range(tile->media_gt,
- &fence[fence_id],
- adj_start,
- adj_end,
- vm->usm.asid);
- if (WARN_ON_ONCE(err < 0))
- goto wait;
- ++fence_id;
- }
- }
-
-wait:
- for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask);
+ WARN_ON_ONCE(err);
range_notifier_event_end:
r = first;
@@ -662,13 +608,72 @@ static bool xe_svm_range_is_valid(struct xe_svm_range *range,
struct xe_tile *tile,
bool devmem_only)
{
- /*
- * Advisory only check whether the range currently has a valid mapping,
- * READ_ONCE pairs with WRITE_ONCE in xe_pt.c
- */
- return ((READ_ONCE(range->tile_present) &
- ~READ_ONCE(range->tile_invalidated)) & BIT(tile->id)) &&
- (!devmem_only || xe_svm_range_in_vram(range));
+ return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present,
+ range->tile_invalidated) &&
+ (!devmem_only || xe_svm_range_in_vram(range)));
+}
+
+/** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM
+ * @vm: xe_vm pointer
+ * @range: Pointer to the SVM range structure
+ *
+ * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM
+ * and migrates them to SMEM
+ */
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
+{
+ if (xe_svm_range_in_vram(range))
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+}
+
+/**
+ * xe_svm_range_validate() - Check if the SVM range is valid
+ * @vm: xe_vm pointer
+ * @range: Pointer to the SVM range structure
+ * @tile_mask: Mask representing the tiles to be checked
+ * @devmem_preferred : if true range needs to be in devmem
+ *
+ * The xe_svm_range_validate() function checks if a range is
+ * valid and located in the desired memory region.
+ *
+ * Return: true if the range is valid, false otherwise
+ */
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred)
+{
+ bool ret;
+
+ xe_svm_notifier_lock(vm);
+
+ ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask &&
+ (devmem_preferred == range->base.flags.has_devmem_pages);
+
+ xe_svm_notifier_unlock(vm);
+
+ return ret;
+}
+
+/**
+ * xe_svm_find_vma_start - Find start of CPU VMA
+ * @vm: xe_vm pointer
+ * @start: start address
+ * @end: end address
+ * @vma: Pointer to struct xe_vma
+ *
+ *
+ * This function searches for a cpu vma, within the specified
+ * range [start, end] in the given VM. It adjusts the range based on the
+ * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX.
+ *
+ * Return: The starting address of the VMA within the range,
+ * or ULONG_MAX if no VMA is found
+ */
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma)
+{
+ return drm_gpusvm_find_vma_start(&vm->svm.gpusvm,
+ max(start, xe_vma_start(vma)),
+ min(end, xe_vma_end(vma)));
}
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
@@ -677,9 +682,19 @@ static struct xe_vram_region *tile_to_vr(struct xe_tile *tile)
return &tile->mem.vram;
}
-static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
+/**
+ * xe_svm_alloc_vram()- Allocate device memory pages for range,
+ * migrating existing data.
+ * @vm: The VM.
+ * @tile: tile to allocate vram from
+ * @range: SVM range
+ * @ctx: DRM GPU SVM context
+ *
+ * Return: 0 on success, error code on failure.
+ */
+int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
{
struct mm_struct *mm = vm->svm.gpusvm.mm;
struct xe_vram_region *vr = tile_to_vr(tile);
@@ -733,13 +748,6 @@ unlock:
return err;
}
-#else
-static int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
- struct xe_svm_range *range,
- const struct drm_gpusvm_ctx *ctx)
-{
- return -EOPNOTSUPP;
-}
#endif
static bool supports_4K_migration(struct xe_device *xe)
@@ -750,21 +758,31 @@ static bool supports_4K_migration(struct xe_device *xe)
return true;
}
-static bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range,
- struct xe_vma *vma)
+/**
+ * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not
+ * @range: SVM range for which migration needs to be decided
+ * @vma: vma which has range
+ * @preferred_region_is_vram: preferred region for range is vram
+ *
+ * Return: True for range needing migration and migration is supported else false
+ */
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ bool preferred_region_is_vram)
{
struct xe_vm *vm = range_to_vm(&range->base);
u64 range_size = xe_svm_range_size(range);
- if (!range->base.flags.migrate_devmem)
+ if (!range->base.flags.migrate_devmem || !preferred_region_is_vram)
return false;
- if (xe_svm_range_in_vram(range)) {
- drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
+ xe_assert(vm->xe, IS_DGFX(vm->xe));
+
+ if (preferred_region_is_vram && xe_svm_range_in_vram(range)) {
+ drm_info(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
- if (range_size <= SZ_64K && !supports_4K_migration(vm->xe)) {
+ if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) {
drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n");
return false;
}
@@ -798,14 +816,13 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
.devmem_only = atomic && IS_DGFX(vm->xe) &&
IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR),
.timeslice_ms = atomic && IS_DGFX(vm->xe) &&
- IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? 5 : 0,
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ?
+ vm->xe->atomic_svm_timeslice_ms : 0,
};
struct xe_svm_range *range;
- struct drm_gpusvm_range *r;
- struct drm_exec exec;
struct dma_fence *fence;
- int migrate_try_count = ctx.devmem_only ? 3 : 1;
struct xe_tile *tile = gt_to_tile(gt);
+ int migrate_try_count = ctx.devmem_only ? 3 : 1;
ktime_t end = 0;
int err;
@@ -820,23 +837,21 @@ retry:
if (err)
return err;
- r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, fault_addr,
- xe_vma_start(vma), xe_vma_end(vma),
- &ctx);
- if (IS_ERR(r))
- return PTR_ERR(r);
+ range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx);
+
+ if (IS_ERR(range))
+ return PTR_ERR(range);
- if (ctx.devmem_only && !r->flags.migrate_devmem)
+ if (ctx.devmem_only && !range->base.flags.migrate_devmem)
return -EACCES;
- range = to_xe_range(r);
if (xe_svm_range_is_valid(range, tile, ctx.devmem_only))
return 0;
range_debug(range, "PAGE FAULT");
if (--migrate_try_count >= 0 &&
- xe_svm_range_needs_migrate_to_vram(range, vma)) {
+ xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) {
err = xe_svm_alloc_vram(vm, tile, range, &ctx);
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (err) {
@@ -855,16 +870,11 @@ retry:
}
range_debug(range, "GET PAGES");
- err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
+ err = xe_svm_range_get_pages(vm, range, &ctx);
/* Corner where CPU mappings have changed */
if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
if (migrate_try_count > 0 || !ctx.devmem_only) {
- if (err == -EOPNOTSUPP) {
- range_debug(range, "PAGE FAULT - EVICT PAGES");
- drm_gpusvm_range_evict(&vm->svm.gpusvm,
- &range->base);
- }
drm_dbg(&vm->xe->drm,
"Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n",
vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
@@ -884,30 +894,21 @@ retry:
range_debug(range, "PAGE FAULT - BIND");
retry_bind:
- drm_exec_init(&exec, 0, 0);
- drm_exec_until_all_locked(&exec) {
- err = drm_exec_lock_obj(&exec, vm->gpuvm.r_obj);
- drm_exec_retry_on_contention(&exec);
- if (err) {
- drm_exec_fini(&exec);
- goto err_out;
- }
-
- fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
- if (IS_ERR(fence)) {
- drm_exec_fini(&exec);
- err = PTR_ERR(fence);
- if (err == -EAGAIN) {
- ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
- range_debug(range, "PAGE FAULT - RETRY BIND");
- goto retry;
- }
- if (xe_vm_validate_should_retry(&exec, err, &end))
- goto retry_bind;
- goto err_out;
+ xe_vm_lock(vm, false);
+ fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id));
+ if (IS_ERR(fence)) {
+ xe_vm_unlock(vm);
+ err = PTR_ERR(fence);
+ if (err == -EAGAIN) {
+ ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */
+ range_debug(range, "PAGE FAULT - RETRY BIND");
+ goto retry;
}
+ if (xe_vm_validate_should_retry(NULL, err, &end))
+ goto retry_bind;
+ goto err_out;
}
- drm_exec_fini(&exec);
+ xe_vm_unlock(vm);
dma_fence_wait(fence, false);
dma_fence_put(fence);
@@ -946,6 +947,56 @@ int xe_svm_bo_evict(struct xe_bo *bo)
return drm_gpusvm_evict_to_ram(&bo->devmem_allocation);
}
+/**
+ * xe_svm_range_find_or_insert- Find or insert GPU SVM range
+ * @vm: xe_vm pointer
+ * @addr: address for which range needs to be found/inserted
+ * @vma: Pointer to struct xe_vma which mirrors CPU
+ * @ctx: GPU SVM context
+ *
+ * This function finds or inserts a newly allocated a SVM range based on the
+ * address.
+ *
+ * Return: Pointer to the SVM range on success, ERR_PTR() on failure.
+ */
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
+{
+ struct drm_gpusvm_range *r;
+
+ r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)),
+ xe_vma_start(vma), xe_vma_end(vma), ctx);
+ if (IS_ERR(r))
+ return ERR_PTR(PTR_ERR(r));
+
+ return to_xe_range(r);
+}
+
+/**
+ * xe_svm_range_get_pages() - Get pages for a SVM range
+ * @vm: Pointer to the struct xe_vm
+ * @range: Pointer to the xe SVM range structure
+ * @ctx: GPU SVM context
+ *
+ * This function gets pages for a SVM range and ensures they are mapped for
+ * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx)
+{
+ int err = 0;
+
+ err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx);
+ if (err == -EOPNOTSUPP) {
+ range_debug(range, "PAGE FAULT - EVICT PAGES");
+ drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base);
+ }
+
+ return err;
+}
+
#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
static struct drm_pagemap_device_addr
@@ -1024,6 +1075,13 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
return 0;
}
#else
+int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr)
{
return 0;
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 30fc78b85b30..19ce4f2754a7 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -70,6 +70,27 @@ int xe_svm_bo_evict(struct xe_bo *bo);
void xe_svm_range_debug(struct xe_svm_range *range, const char *operation);
+int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx);
+
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx);
+
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx);
+
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ bool preferred_region_is_vram);
+
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range);
+
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred);
+
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma);
+
/**
* xe_svm_range_has_dma_mapping() - SVM range has DMA mapping
* @range: SVM range
@@ -82,6 +103,53 @@ static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
return range->base.flags.has_dma_mapping;
}
+/**
+ * to_xe_range - Convert a drm_gpusvm_range pointer to a xe_svm_range
+ * @r: Pointer to the drm_gpusvm_range structure
+ *
+ * This function takes a pointer to a drm_gpusvm_range structure and
+ * converts it to a pointer to the containing xe_svm_range structure.
+ *
+ * Return: Pointer to the xe_svm_range structure
+ */
+static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return container_of(r, struct xe_svm_range, base);
+}
+
+/**
+ * xe_svm_range_start() - SVM range start address
+ * @range: SVM range
+ *
+ * Return: start address of range.
+ */
+static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_start(&range->base);
+}
+
+/**
+ * xe_svm_range_end() - SVM range end address
+ * @range: SVM range
+ *
+ * Return: end address of range.
+ */
+static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_end(&range->base);
+}
+
+/**
+ * xe_svm_range_size() - SVM range size
+ * @range: SVM range
+ *
+ * Return: Size of range.
+ */
+static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return drm_gpusvm_range_size(&range->base);
+}
+
#define xe_svm_assert_in_notifier(vm__) \
lockdep_assert_held_write(&(vm__)->svm.gpusvm.notifier_lock)
@@ -97,6 +165,8 @@ void xe_svm_flush(struct xe_vm *vm);
#include <linux/interval_tree.h>
struct drm_pagemap_device_addr;
+struct drm_gpusvm_ctx;
+struct drm_gpusvm_range;
struct xe_bo;
struct xe_gt;
struct xe_vm;
@@ -167,6 +237,74 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation)
{
}
+static inline
+int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile,
+ struct xe_svm_range *range,
+ const struct drm_gpusvm_ctx *ctx)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline
+struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr,
+ struct xe_vma *vma, struct drm_gpusvm_ctx *ctx)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline
+int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range,
+ struct drm_gpusvm_ctx *ctx)
+{
+ return -EINVAL;
+}
+
+static inline struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
+{
+ return NULL;
+}
+
+static inline unsigned long xe_svm_range_start(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline unsigned long xe_svm_range_end(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline unsigned long xe_svm_range_size(struct xe_svm_range *range)
+{
+ return 0;
+}
+
+static inline
+bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma,
+ u32 region)
+{
+ return false;
+}
+
+static inline
+void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range)
+{
+}
+
+static inline
+bool xe_svm_range_validate(struct xe_vm *vm,
+ struct xe_svm_range *range,
+ u8 tile_mask, bool devmem_preferred)
+{
+ return false;
+}
+
+static inline
+u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 addr, u64 end, struct xe_vma *vma)
+{
+ return ULONG_MAX;
+}
+
#define xe_svm_assert_in_notifier(...) do {} while (0)
#define xe_svm_range_has_dma_mapping(...) false
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 0771acbbf367..672faa0b67f1 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -87,13 +87,9 @@
*/
static int xe_tile_alloc(struct xe_tile *tile)
{
- struct drm_device *drm = &tile_to_xe(tile)->drm;
-
- tile->mem.ggtt = drmm_kzalloc(drm, sizeof(*tile->mem.ggtt),
- GFP_KERNEL);
+ tile->mem.ggtt = xe_ggtt_alloc(tile);
if (!tile->mem.ggtt)
return -ENOMEM;
- tile->mem.ggtt->tile = tile;
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.c b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
new file mode 100644
index 000000000000..f221dbed16f0
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.c
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#include <drm/drm_managed.h>
+
+#include "regs/xe_gtt_defs.h"
+
+#include "xe_assert.h"
+#include "xe_ggtt.h"
+#include "xe_gt_sriov_vf.h"
+#include "xe_sriov.h"
+#include "xe_sriov_printk.h"
+#include "xe_tile_sriov_vf.h"
+#include "xe_wopcm.h"
+
+static int vf_init_ggtt_balloons(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ tile->sriov.vf.ggtt_balloon[0] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[0]))
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[0]);
+
+ tile->sriov.vf.ggtt_balloon[1] = xe_ggtt_node_init(ggtt);
+ if (IS_ERR(tile->sriov.vf.ggtt_balloon[1])) {
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]);
+ return PTR_ERR(tile->sriov.vf.ggtt_balloon[1]);
+ }
+
+ return 0;
+}
+
+/**
+ * xe_tile_sriov_vf_balloon_ggtt_locked - Insert balloon nodes to limit used GGTT address range.
+ * @tile: the &xe_tile struct instance
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile)
+{
+ u64 ggtt_base = xe_gt_sriov_vf_ggtt_base(tile->primary_gt);
+ u64 ggtt_size = xe_gt_sriov_vf_ggtt(tile->primary_gt);
+ struct xe_device *xe = tile_to_xe(tile);
+ u64 wopcm = xe_wopcm_size(xe);
+ u64 start, end;
+ int err;
+
+ xe_tile_assert(tile, IS_SRIOV_VF(xe));
+ xe_tile_assert(tile, ggtt_size);
+ lockdep_assert_held(&tile->mem.ggtt->lock);
+
+ /*
+ * VF can only use part of the GGTT as allocated by the PF:
+ *
+ * WOPCM GUC_GGTT_TOP
+ * |<------------ Total GGTT size ------------------>|
+ *
+ * VF GGTT base -->|<- size ->|
+ *
+ * +--------------------+----------+-----------------+
+ * |////////////////////| block |\\\\\\\\\\\\\\\\\|
+ * +--------------------+----------+-----------------+
+ *
+ * |<--- balloon[0] --->|<-- VF -->|<-- balloon[1] ->|
+ */
+
+ if (ggtt_base < wopcm || ggtt_base > GUC_GGTT_TOP ||
+ ggtt_size > GUC_GGTT_TOP - ggtt_base) {
+ xe_sriov_err(xe, "tile%u: Invalid GGTT configuration: %#llx-%#llx\n",
+ tile->id, ggtt_base, ggtt_base + ggtt_size - 1);
+ return -ERANGE;
+ }
+
+ start = wopcm;
+ end = ggtt_base;
+ if (end != start) {
+ err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[0],
+ start, end);
+ if (err)
+ return err;
+ }
+
+ start = ggtt_base + ggtt_size;
+ end = GUC_GGTT_TOP;
+ if (end != start) {
+ err = xe_ggtt_node_insert_balloon_locked(tile->sriov.vf.ggtt_balloon[1],
+ start, end);
+ if (err) {
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int vf_balloon_ggtt(struct xe_tile *tile)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+ int err;
+
+ mutex_lock(&ggtt->lock);
+ err = xe_tile_sriov_vf_balloon_ggtt_locked(tile);
+ mutex_unlock(&ggtt->lock);
+
+ return err;
+}
+
+/**
+ * xe_tile_sriov_vf_deballoon_ggtt_locked - Remove balloon nodes.
+ * @tile: the &xe_tile struct instance
+ */
+void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile)
+{
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[1]);
+ xe_ggtt_node_remove_balloon_locked(tile->sriov.vf.ggtt_balloon[0]);
+}
+
+static void vf_deballoon_ggtt(struct xe_tile *tile)
+{
+ mutex_lock(&tile->mem.ggtt->lock);
+ xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
+ mutex_unlock(&tile->mem.ggtt->lock);
+}
+
+static void vf_fini_ggtt_balloons(struct xe_tile *tile)
+{
+ xe_tile_assert(tile, IS_SRIOV_VF(tile_to_xe(tile)));
+
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[1]);
+ xe_ggtt_node_fini(tile->sriov.vf.ggtt_balloon[0]);
+}
+
+static void cleanup_ggtt(struct drm_device *drm, void *arg)
+{
+ struct xe_tile *tile = arg;
+
+ vf_deballoon_ggtt(tile);
+ vf_fini_ggtt_balloons(tile);
+}
+
+/**
+ * xe_tile_sriov_vf_prepare_ggtt - Prepare a VF's GGTT configuration.
+ * @tile: the &xe_tile
+ *
+ * This function is for VF use only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile)
+{
+ struct xe_device *xe = tile_to_xe(tile);
+ int err;
+
+ err = vf_init_ggtt_balloons(tile);
+ if (err)
+ return err;
+
+ err = vf_balloon_ggtt(tile);
+ if (err) {
+ vf_fini_ggtt_balloons(tile);
+ return err;
+ }
+
+ return drmm_add_action_or_reset(&xe->drm, cleanup_ggtt, tile);
+}
+
+/**
+ * DOC: GGTT nodes shifting during VF post-migration recovery
+ *
+ * The first fixup applied to the VF KMD structures as part of post-migration
+ * recovery is shifting nodes within &xe_ggtt instance. The nodes are moved
+ * from range previously assigned to this VF, into newly provisioned area.
+ * The changes include balloons, which are resized accordingly.
+ *
+ * The balloon nodes are there to eliminate unavailable ranges from use: one
+ * reserves the GGTT area below the range for current VF, and another one
+ * reserves area above.
+ *
+ * Below is a GGTT layout of example VF, with a certain address range assigned to
+ * said VF, and inaccessible areas above and below:
+ *
+ * 0 4GiB
+ * |<--------------------------- Total GGTT size ----------------------------->|
+ * WOPCM GUC_TOP
+ * |<-------------- Area mappable by xe_ggtt instance ---------------->|
+ *
+ * +---+---------------------------------+----------+----------------------+---+
+ * |\\\|/////////////////////////////////| VF mem |//////////////////////|\\\|
+ * +---+---------------------------------+----------+----------------------+---+
+ *
+ * Hardware enforced access rules before migration:
+ *
+ * |<------- inaccessible for VF ------->|<VF owned>|<-- inaccessible for VF ->|
+ *
+ * GGTT nodes used for tracking allocations:
+ *
+ * |<---------- balloon ------------>|<- nodes->|<----- balloon ------>|
+ *
+ * After the migration, GGTT area assigned to the VF might have shifted, either
+ * to lower or to higher address. But we expect the total size and extra areas to
+ * be identical, as migration can only happen between matching platforms.
+ * Below is an example of GGTT layout of the VF after migration. Content of the
+ * GGTT for VF has been moved to a new area, and we receive its address from GuC:
+ *
+ * +---+----------------------+----------+---------------------------------+---+
+ * |\\\|//////////////////////| VF mem |/////////////////////////////////|\\\|
+ * +---+----------------------+----------+---------------------------------+---+
+ *
+ * Hardware enforced access rules after migration:
+ *
+ * |<- inaccessible for VF -->|<VF owned>|<------- inaccessible for VF ------->|
+ *
+ * So the VF has a new slice of GGTT assigned, and during migration process, the
+ * memory content was copied to that new area. But the &xe_ggtt nodes are still
+ * tracking allocations using the old addresses. The nodes within VF owned area
+ * have to be shifted, and balloon nodes need to be resized to properly mask out
+ * areas not owned by the VF.
+ *
+ * Fixed &xe_ggtt nodes used for tracking allocations:
+ *
+ * |<------ balloon ------>|<- nodes->|<----------- balloon ----------->|
+ *
+ * Due to use of GPU profiles, we do not expect the old and new GGTT ares to
+ * overlap; but our node shifting will fix addresses properly regardless.
+ */
+
+/**
+ * xe_tile_sriov_vf_fixup_ggtt_nodes - Shift GGTT allocations to match assigned range.
+ * @tile: the &xe_tile struct instance
+ * @shift: the shift value
+ *
+ * Since Global GTT is not virtualized, each VF has an assigned range
+ * within the global space. This range might have changed during migration,
+ * which requires all memory addresses pointing to GGTT to be shifted.
+ */
+void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift)
+{
+ struct xe_ggtt *ggtt = tile->mem.ggtt;
+
+ mutex_lock(&ggtt->lock);
+
+ xe_tile_sriov_vf_deballoon_ggtt_locked(tile);
+ xe_ggtt_shift_nodes_locked(ggtt, shift);
+ xe_tile_sriov_vf_balloon_ggtt_locked(tile);
+
+ mutex_unlock(&ggtt->lock);
+}
diff --git a/drivers/gpu/drm/xe/xe_tile_sriov_vf.h b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
new file mode 100644
index 000000000000..93eb043171e8
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_tile_sriov_vf.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _XE_TILE_SRIOV_VF_H_
+#define _XE_TILE_SRIOV_VF_H_
+
+#include <linux/types.h>
+
+struct xe_tile;
+
+int xe_tile_sriov_vf_prepare_ggtt(struct xe_tile *tile);
+int xe_tile_sriov_vf_balloon_ggtt_locked(struct xe_tile *tile);
+void xe_tile_sriov_vf_deballoon_ggtt_locked(struct xe_tile *tile);
+void xe_tile_sriov_vf_fixup_ggtt_nodes(struct xe_tile *tile, s64 shift);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c
index 49ddbda7cdef..828b45b24c23 100644
--- a/drivers/gpu/drm/xe/xe_tuning.c
+++ b/drivers/gpu/drm/xe/xe_tuning.c
@@ -98,6 +98,11 @@ static const struct xe_rtp_entry_sr engine_tunings[] = {
ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE))
},
+ { XE_RTP_NAME("Tuning: Disable NULL query for Anyhit Shader"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(3000, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_ACTIONS(SET(RT_CTRL, DIS_NULL_QUERY))
+ },
};
static const struct xe_rtp_entry_sr lrc_tunings[] = {
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index 2741849bbf4d..6d0869518652 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -16,6 +16,7 @@
#include "xe_gsc.h"
#include "xe_gt.h"
#include "xe_gt_printk.h"
+#include "xe_gt_sriov_vf.h"
#include "xe_guc.h"
#include "xe_map.h"
#include "xe_mmio.h"
@@ -662,11 +663,39 @@ do { \
ver_->major, ver_->minor, ver_->patch); \
} while (0)
+static void uc_fw_vf_override(struct xe_uc_fw *uc_fw)
+{
+ struct xe_uc_fw_version *compat = &uc_fw->versions.found[XE_UC_FW_VER_COMPATIBILITY];
+ struct xe_uc_fw_version *wanted = &uc_fw->versions.wanted;
+
+ /* Only GuC/HuC are supported */
+ if (uc_fw->type != XE_UC_FW_TYPE_GUC && uc_fw->type != XE_UC_FW_TYPE_HUC)
+ uc_fw->path = NULL;
+
+ /* VF will support only firmwares that driver can autoselect */
+ xe_uc_fw_change_status(uc_fw, uc_fw->path ?
+ XE_UC_FIRMWARE_PRELOADED :
+ XE_UC_FIRMWARE_NOT_SUPPORTED);
+
+ if (!xe_uc_fw_is_supported(uc_fw))
+ return;
+
+ /* PF is doing the loading, so we don't need a path on the VF */
+ uc_fw->path = "Loaded by PF";
+
+ /* The GuC versions are set up during the VF bootstrap */
+ if (uc_fw->type == XE_UC_FW_TYPE_GUC) {
+ uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY;
+ xe_gt_sriov_vf_guc_versions(uc_fw_to_gt(uc_fw), wanted, compat);
+ }
+}
+
static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmware_p)
{
struct xe_device *xe = uc_fw_to_xe(uc_fw);
+ struct xe_gt *gt = uc_fw_to_gt(uc_fw);
+ struct drm_printer p = xe_gt_info_printer(gt);
struct device *dev = xe->drm.dev;
- struct drm_printer p = drm_info_printer(dev);
const struct firmware *fw = NULL;
int err;
@@ -675,20 +704,13 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
* before we're looked at the HW caps to see if we have uc support
*/
BUILD_BUG_ON(XE_UC_FIRMWARE_UNINITIALIZED);
- xe_assert(xe, !uc_fw->status);
- xe_assert(xe, !uc_fw->path);
+ xe_gt_assert(gt, !uc_fw->status);
+ xe_gt_assert(gt, !uc_fw->path);
uc_fw_auto_select(xe, uc_fw);
if (IS_SRIOV_VF(xe)) {
- /* Only GuC/HuC are supported */
- if (uc_fw->type != XE_UC_FW_TYPE_GUC &&
- uc_fw->type != XE_UC_FW_TYPE_HUC)
- uc_fw->path = NULL;
- /* VF will support only firmwares that driver can autoselect */
- xe_uc_fw_change_status(uc_fw, uc_fw->path ?
- XE_UC_FIRMWARE_PRELOADED :
- XE_UC_FIRMWARE_NOT_SUPPORTED);
+ uc_fw_vf_override(uc_fw);
return 0;
}
@@ -700,7 +722,7 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
if (!xe_uc_fw_is_supported(uc_fw)) {
if (uc_fw->type == XE_UC_FW_TYPE_GUC) {
- drm_err(&xe->drm, "No GuC firmware defined for platform\n");
+ xe_gt_err(gt, "No GuC firmware defined for platform\n");
return -ENOENT;
}
return 0;
@@ -709,7 +731,7 @@ static int uc_fw_request(struct xe_uc_fw *uc_fw, const struct firmware **firmwar
/* an empty path means the firmware is disabled */
if (!xe_device_uc_enabled(xe) || !(*uc_fw->path)) {
xe_uc_fw_change_status(uc_fw, XE_UC_FIRMWARE_DISABLED);
- drm_dbg(&xe->drm, "%s disabled", xe_uc_fw_type_repr(uc_fw->type));
+ xe_gt_dbg(gt, "%s disabled\n", xe_uc_fw_type_repr(uc_fw->type));
return 0;
}
@@ -742,10 +764,10 @@ fail:
XE_UC_FIRMWARE_MISSING :
XE_UC_FIRMWARE_ERROR);
- drm_notice(&xe->drm, "%s firmware %s: fetch failed with error %d\n",
- xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
- drm_info(&xe->drm, "%s firmware(s) can be downloaded from %s\n",
- xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL);
+ xe_gt_notice(gt, "%s firmware %s: fetch failed with error %pe\n",
+ xe_uc_fw_type_repr(uc_fw->type), uc_fw->path, ERR_PTR(err));
+ xe_gt_info(gt, "%s firmware(s) can be downloaded from %s\n",
+ xe_uc_fw_type_repr(uc_fw->type), XE_UC_FIRMWARE_URL);
release_firmware(fw); /* OK even if fw is NULL */
diff --git a/drivers/gpu/drm/xe/xe_uc_fw_types.h b/drivers/gpu/drm/xe/xe_uc_fw_types.h
index ad3b35a0e6eb..914026015019 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw_types.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw_types.h
@@ -65,6 +65,8 @@ enum xe_uc_fw_type {
* struct xe_uc_fw_version - Version for XE micro controller firmware
*/
struct xe_uc_fw_version {
+ /** @branch: branch version of the FW (not always available) */
+ u16 branch;
/** @major: major version of the FW */
u16 major;
/** @minor: minor version of the FW */
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 861577746929..04d1a43b81e3 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -732,7 +732,9 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
DMA_RESV_USAGE_BOOKKEEP,
false, MAX_SCHEDULE_TIMEOUT);
+ down_read(&vm->userptr.notifier_lock);
err = xe_vm_invalidate_vma(&uvma->vma);
+ up_read(&vm->userptr.notifier_lock);
xe_vm_unlock(vm);
if (err)
break;
@@ -798,21 +800,47 @@ static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)
}
ALLOW_ERROR_INJECTION(xe_vma_ops_alloc, ERRNO);
+static void xe_vma_svm_prefetch_op_fini(struct xe_vma_op *op)
+{
+ struct xe_vma *vma;
+
+ vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH && xe_vma_is_cpu_addr_mirror(vma))
+ xa_destroy(&op->prefetch_range.range);
+}
+
+static void xe_vma_svm_prefetch_ops_fini(struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+
+ if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
+ return;
+
+ list_for_each_entry(op, &vops->list, link)
+ xe_vma_svm_prefetch_op_fini(op);
+}
+
static void xe_vma_ops_fini(struct xe_vma_ops *vops)
{
int i;
+ xe_vma_svm_prefetch_ops_fini(vops);
+
for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
kfree(vops->pt_update_ops[i].ops);
}
-static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask)
+static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, int inc_val)
{
int i;
+ if (!inc_val)
+ return;
+
for (i = 0; i < XE_MAX_TILES_PER_DEVICE; ++i)
if (BIT(i) & tile_mask)
- ++vops->pt_update_ops[i].num_ops;
+ vops->pt_update_ops[i].num_ops += inc_val;
}
static void xe_vm_populate_rebind(struct xe_vma_op *op, struct xe_vma *vma,
@@ -842,7 +870,7 @@ static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma,
xe_vm_populate_rebind(op, vma, tile_mask);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
return 0;
}
@@ -977,7 +1005,7 @@ xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops,
xe_vm_populate_range_rebind(op, vma, range, tile_mask);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1);
return 0;
}
@@ -1062,7 +1090,7 @@ xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops,
xe_vm_populate_range_unbind(op, range);
list_add_tail(&op->link, &vops->list);
- xe_vma_ops_incr_pt_update_ops(vops, range->tile_present);
+ xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1);
return 0;
}
@@ -2141,6 +2169,35 @@ int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
return err;
}
+static bool vma_matches(struct xe_vma *vma, u64 page_addr)
+{
+ if (page_addr > xe_vma_end(vma) - 1 ||
+ page_addr + SZ_4K - 1 < xe_vma_start(vma))
+ return false;
+
+ return true;
+}
+
+/**
+ * xe_vm_find_vma_by_addr() - Find a VMA by its address
+ *
+ * @vm: the xe_vm the vma belongs to
+ * @page_addr: address to look up
+ */
+struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr)
+{
+ struct xe_vma *vma = NULL;
+
+ if (vm->usm.last_fault_vma) { /* Fast lookup */
+ if (vma_matches(vm->usm.last_fault_vma, page_addr))
+ vma = vm->usm.last_fault_vma;
+ }
+ if (!vma)
+ vma = xe_vm_find_overlapping_vma(vm, page_addr, SZ_4K);
+
+ return vma;
+}
+
static const u32 region_to_mem_type[] = {
XE_PL_TT,
XE_PL_VRAM0,
@@ -2221,13 +2278,25 @@ static bool __xe_vm_needs_clear_scratch_pages(struct xe_vm *vm, u32 bind_flags)
return true;
}
+static void xe_svm_prefetch_gpuva_ops_fini(struct drm_gpuva_ops *ops)
+{
+ struct drm_gpuva_op *__op;
+
+ drm_gpuva_for_each_op(__op, ops) {
+ struct xe_vma_op *op = gpuva_op_to_vma_op(__op);
+
+ xe_vma_svm_prefetch_op_fini(op);
+ }
+}
+
/*
* Create operations list from IOCTL arguments, setup operations fields so parse
* and commit steps are decoupled from IOCTL arguments. This step can fail.
*/
static struct drm_gpuva_ops *
-vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
- u64 bo_offset_or_userptr, u64 addr, u64 range,
+vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops,
+ struct xe_bo *bo, u64 bo_offset_or_userptr,
+ u64 addr, u64 range,
u32 operation, u32 flags,
u32 prefetch_region, u16 pat_index)
{
@@ -2235,6 +2304,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
struct drm_gpuva_ops *ops;
struct drm_gpuva_op *__op;
struct drm_gpuvm_bo *vm_bo;
+ u64 range_end = addr + range;
int err;
lockdep_assert_held_write(&vm->lock);
@@ -2296,14 +2366,80 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
op->map.invalidate_on_bind =
__xe_vm_needs_clear_scratch_pages(vm, flags);
} else if (__op->op == DRM_GPUVA_OP_PREFETCH) {
- op->prefetch.region = prefetch_region;
- }
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ struct xe_svm_range *svm_range;
+ struct drm_gpusvm_ctx ctx = {};
+ struct xe_tile *tile;
+ u8 id, tile_mask = 0;
+ u32 i;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma)) {
+ op->prefetch.region = prefetch_region;
+ break;
+ }
+
+ ctx.read_only = xe_vma_read_only(vma);
+ ctx.devmem_possible = IS_DGFX(vm->xe) &&
+ IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR);
+
+ for_each_tile(tile, vm->xe, id)
+ tile_mask |= 0x1 << id;
+
+ xa_init_flags(&op->prefetch_range.range, XA_FLAGS_ALLOC);
+ op->prefetch_range.region = prefetch_region;
+ op->prefetch_range.ranges_count = 0;
+alloc_next_range:
+ svm_range = xe_svm_range_find_or_insert(vm, addr, vma, &ctx);
+
+ if (PTR_ERR(svm_range) == -ENOENT) {
+ u64 ret = xe_svm_find_vma_start(vm, addr, range_end, vma);
+
+ addr = ret == ULONG_MAX ? 0 : ret;
+ if (addr)
+ goto alloc_next_range;
+ else
+ goto print_op_label;
+ }
+
+ if (IS_ERR(svm_range)) {
+ err = PTR_ERR(svm_range);
+ goto unwind_prefetch_ops;
+ }
+
+ if (xe_svm_range_validate(vm, svm_range, tile_mask, !!prefetch_region)) {
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE IS VALID");
+ goto check_next_range;
+ }
+
+ err = xa_alloc(&op->prefetch_range.range,
+ &i, svm_range, xa_limit_32b,
+ GFP_KERNEL);
+ if (err)
+ goto unwind_prefetch_ops;
+
+ op->prefetch_range.ranges_count++;
+ vops->flags |= XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH;
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE CREATED");
+check_next_range:
+ if (range_end > xe_svm_range_end(svm_range) &&
+ xe_svm_range_end(svm_range) < xe_vma_end(vma)) {
+ addr = xe_svm_range_end(svm_range);
+ goto alloc_next_range;
+ }
+ }
+print_op_label:
print_op(vm->xe, __op);
}
return ops;
+
+unwind_prefetch_ops:
+ xe_svm_prefetch_gpuva_ops_fini(ops);
+ drm_gpuva_ops_free(&vm->gpuvm, ops);
+ return ERR_PTR(err);
}
+
ALLOW_ERROR_INJECTION(vm_bind_ioctl_ops_create, ERRNO);
static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op,
@@ -2498,7 +2634,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
!op->map.is_cpu_addr_mirror) ||
op->map.invalidate_on_bind)
xe_vma_ops_incr_pt_update_ops(vops,
- op->tile_mask);
+ op->tile_mask, 1);
break;
}
case DRM_GPUVA_OP_REMAP:
@@ -2507,6 +2643,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
gpuva_to_vma(op->base.remap.unmap->va);
bool skip = xe_vma_is_cpu_addr_mirror(old);
u64 start = xe_vma_start(old), end = xe_vma_end(old);
+ int num_remap_ops = 0;
if (op->base.remap.prev)
start = op->base.remap.prev->va.addr +
@@ -2559,7 +2696,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
(ULL)op->remap.start,
(ULL)op->remap.range);
} else {
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
}
}
@@ -2588,11 +2725,13 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
(ULL)op->remap.start,
(ULL)op->remap.range);
} else {
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
}
}
if (!skip)
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ num_remap_ops++;
+
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, num_remap_ops);
break;
}
case DRM_GPUVA_OP_UNMAP:
@@ -2604,7 +2743,7 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return -EBUSY;
if (!xe_vma_is_cpu_addr_mirror(vma))
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
break;
case DRM_GPUVA_OP_PREFETCH:
vma = gpuva_to_vma(op->base.prefetch.va);
@@ -2615,8 +2754,12 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
return err;
}
- if (!xe_vma_is_cpu_addr_mirror(vma))
- xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask,
+ op->prefetch_range.ranges_count);
+ else
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1);
+
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@@ -2742,6 +2885,57 @@ static int check_ufence(struct xe_vma *vma)
return 0;
}
+static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op)
+{
+ bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR);
+ struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
+ int err = 0;
+
+ struct xe_svm_range *svm_range;
+ struct drm_gpusvm_ctx ctx = {};
+ struct xe_tile *tile;
+ unsigned long i;
+ u32 region;
+
+ if (!xe_vma_is_cpu_addr_mirror(vma))
+ return 0;
+
+ region = op->prefetch_range.region;
+
+ ctx.read_only = xe_vma_read_only(vma);
+ ctx.devmem_possible = devmem_possible;
+ ctx.check_pages_threshold = devmem_possible ? SZ_64K : 0;
+
+ /* TODO: Threading the migration */
+ xa_for_each(&op->prefetch_range.range, i, svm_range) {
+ if (!region)
+ xe_svm_range_migrate_to_smem(vm, svm_range);
+
+ if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) {
+ tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0];
+ err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ return -ENODATA;
+ }
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE MIGRATED TO VRAM");
+ }
+
+ err = xe_svm_range_get_pages(vm, svm_range, &ctx);
+ if (err) {
+ drm_dbg(&vm->xe->drm, "Get pages failed, asid=%u, gpusvm=%p, errno=%pe\n",
+ vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err));
+ if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM)
+ err = -ENODATA;
+ return err;
+ }
+ xe_svm_range_debug(svm_range, "PREFETCH - RANGE GET PAGES DONE");
+ }
+
+ return err;
+}
+
static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
struct xe_vma_op *op)
{
@@ -2779,7 +2973,12 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
case DRM_GPUVA_OP_PREFETCH:
{
struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va);
- u32 region = op->prefetch.region;
+ u32 region;
+
+ if (xe_vma_is_cpu_addr_mirror(vma))
+ region = op->prefetch_range.region;
+ else
+ region = op->prefetch.region;
xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type));
@@ -2798,6 +2997,25 @@ static int op_lock_and_prep(struct drm_exec *exec, struct xe_vm *vm,
return err;
}
+static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops)
+{
+ struct xe_vma_op *op;
+ int err;
+
+ if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH))
+ return 0;
+
+ list_for_each_entry(op, &vops->list, link) {
+ if (op->base.op == DRM_GPUVA_OP_PREFETCH) {
+ err = prefetch_ranges(vm, op);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
struct xe_vm *vm,
struct xe_vma_ops *vops)
@@ -3239,6 +3457,7 @@ static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
vops->q = q;
vops->syncs = syncs;
vops->num_syncs = num_syncs;
+ vops->flags = 0;
}
static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo,
@@ -3446,7 +3665,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance;
u16 pat_index = bind_ops[i].pat_index;
- ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
+ ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset,
addr, range, op, flags,
prefetch_region, pat_index);
if (IS_ERR(ops[i])) {
@@ -3479,6 +3698,10 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
if (err)
goto unwind_ops;
+ err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops);
+ if (err)
+ goto unwind_ops;
+
fence = vm_bind_ioctl_ops_execute(vm, &vops);
if (IS_ERR(fence))
err = PTR_ERR(fence);
@@ -3548,7 +3771,7 @@ struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo,
xe_vma_ops_init(&vops, vm, q, NULL, 0);
- ops = vm_bind_ioctl_ops_create(vm, bo, 0, addr, bo->size,
+ ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, bo->size,
DRM_XE_VM_BIND_OP_MAP, 0, 0,
vm->xe->pat.idx[cache_lvl]);
if (IS_ERR(ops)) {
@@ -3620,6 +3843,68 @@ void xe_vm_unlock(struct xe_vm *vm)
}
/**
+ * xe_vm_range_tilemask_tlb_invalidation - Issue a TLB invalidation on this tilemask for an
+ * address range
+ * @vm: The VM
+ * @start: start address
+ * @end: end address
+ * @tile_mask: mask for which gt's issue tlb invalidation
+ *
+ * Issue a range based TLB invalidation for gt's in tilemask
+ *
+ * Returns 0 for success, negative error code otherwise.
+ */
+int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask)
+{
+ struct xe_gt_tlb_invalidation_fence fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
+ struct xe_tile *tile;
+ u32 fence_id = 0;
+ u8 id;
+ int err;
+
+ if (!tile_mask)
+ return 0;
+
+ for_each_tile(tile, vm->xe, id) {
+ if (tile_mask & BIT(id)) {
+ xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->primary_gt,
+ &fence[fence_id],
+ start,
+ end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
+
+ if (!tile->media_gt)
+ continue;
+
+ xe_gt_tlb_invalidation_fence_init(tile->media_gt,
+ &fence[fence_id], true);
+
+ err = xe_gt_tlb_invalidation_range(tile->media_gt,
+ &fence[fence_id],
+ start,
+ end,
+ vm->usm.asid);
+ if (err)
+ goto wait;
+ ++fence_id;
+ }
+ }
+
+wait:
+ for (id = 0; id < fence_id; ++id)
+ xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+
+ return err;
+}
+
+/**
* xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock
* @vma: VMA to invalidate
*
@@ -3632,28 +3917,34 @@ void xe_vm_unlock(struct xe_vm *vm)
int xe_vm_invalidate_vma(struct xe_vma *vma)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
+ struct xe_vm *vm = xe_vma_vm(vma);
struct xe_tile *tile;
- struct xe_gt_tlb_invalidation_fence
- fence[XE_MAX_TILES_PER_DEVICE * XE_MAX_GT_PER_TILE];
- u8 id;
- u32 fence_id = 0;
+ u8 tile_mask = 0;
int ret = 0;
+ u8 id;
xe_assert(xe, !xe_vma_is_null(vma));
xe_assert(xe, !xe_vma_is_cpu_addr_mirror(vma));
trace_xe_vma_invalidate(vma);
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ vm_dbg(&vm->xe->drm,
"INVALIDATE: addr=0x%016llx, range=0x%016llx",
xe_vma_start(vma), xe_vma_size(vma));
- /* Check that we don't race with page-table updates */
+ /*
+ * Check that we don't race with page-table updates, tile_invalidated
+ * update is safe
+ */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
if (xe_vma_is_userptr(vma)) {
+ lockdep_assert(lockdep_is_held_type(&vm->userptr.notifier_lock, 0) ||
+ (lockdep_is_held_type(&vm->userptr.notifier_lock, 1) &&
+ lockdep_is_held(&xe_vm_resv(vm)->lock.base)));
+
WARN_ON_ONCE(!mmu_interval_check_retry
(&to_userptr_vma(vma)->userptr.notifier,
to_userptr_vma(vma)->userptr.notifier_seq));
- WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)),
+ WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(vm),
DMA_RESV_USAGE_BOOKKEEP));
} else {
@@ -3661,39 +3952,17 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
}
}
- for_each_tile(tile, xe, id) {
- if (xe_pt_zap_ptes(tile, vma)) {
- xe_device_wmb(xe);
- xe_gt_tlb_invalidation_fence_init(tile->primary_gt,
- &fence[fence_id],
- true);
-
- ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
- &fence[fence_id], vma);
- if (ret)
- goto wait;
- ++fence_id;
-
- if (!tile->media_gt)
- continue;
-
- xe_gt_tlb_invalidation_fence_init(tile->media_gt,
- &fence[fence_id],
- true);
+ for_each_tile(tile, xe, id)
+ if (xe_pt_zap_ptes(tile, vma))
+ tile_mask |= BIT(id);
- ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
- &fence[fence_id], vma);
- if (ret)
- goto wait;
- ++fence_id;
- }
- }
+ xe_device_wmb(xe);
-wait:
- for (id = 0; id < fence_id; ++id)
- xe_gt_tlb_invalidation_fence_wait(&fence[id]);
+ ret = xe_vm_range_tilemask_tlb_invalidation(xe_vma_vm(vma), xe_vma_start(vma),
+ xe_vma_end(vma), tile_mask);
- vma->tile_invalidated = vma->tile_mask;
+ /* WRITE_ONCE pairs with READ_ONCE in xe_vm_has_valid_gpu_mapping() */
+ WRITE_ONCE(vma->tile_invalidated, vma->tile_mask);
return ret;
}
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 494af6bdc646..3475a118f666 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -169,6 +169,8 @@ static inline bool xe_vma_is_userptr(struct xe_vma *vma)
!xe_vma_is_cpu_addr_mirror(vma);
}
+struct xe_vma *xe_vm_find_vma_by_addr(struct xe_vm *vm, u64 page_addr);
+
/**
* to_userptr_vma() - Return a pointer to an embedding userptr vma
* @vma: Pointer to the embedded struct xe_vma
@@ -226,6 +228,9 @@ struct dma_fence *xe_vm_range_rebind(struct xe_vm *vm,
struct dma_fence *xe_vm_range_unbind(struct xe_vm *vm,
struct xe_svm_range *range);
+int xe_vm_range_tilemask_tlb_invalidation(struct xe_vm *vm, u64 start,
+ u64 end, u8 tile_mask);
+
int xe_vm_invalidate_vma(struct xe_vma *vma);
int xe_vm_validate_protected(struct xe_vm *vm);
@@ -370,6 +375,25 @@ static inline bool xe_vm_is_validating(struct xe_vm *vm)
return false;
}
+/**
+ * xe_vm_has_valid_gpu_mapping() - Advisory helper to check if VMA or SVM range has
+ * a valid GPU mapping
+ * @tile: The tile which the GPU mapping belongs to
+ * @tile_present: Tile present mask
+ * @tile_invalidated: Tile invalidated mask
+ *
+ * The READ_ONCEs pair with WRITE_ONCEs in either the TLB invalidation paths
+ * (xe_vm.c, xe_svm.c) or the binding paths (xe_pt.c). These are not reliable
+ * without the notifier lock in userptr or SVM cases, and not reliable without
+ * the BO dma-resv lock in the BO case. As such, they should only be used in
+ * opportunistic cases (e.g., skipping a page fault fix or not skipping a TLB
+ * invalidation) where it is harmless.
+ *
+ * Return: True is there are valid GPU pages, False otherwise
+ */
+#define xe_vm_has_valid_gpu_mapping(tile, tile_present, tile_invalidated) \
+ ((READ_ONCE(tile_present) & ~READ_ONCE(tile_invalidated)) & BIT((tile)->id))
+
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
#else
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 1979e9bdbdf3..bed6088e1bb3 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -100,14 +100,21 @@ struct xe_vma {
struct work_struct destroy_work;
};
- /** @tile_invalidated: VMA has been invalidated */
+ /**
+ * @tile_invalidated: Tile mask of binding are invalidated for this VMA.
+ * protected by BO's resv and for userptrs, vm->userptr.notifier_lock in
+ * write mode for writing or vm->userptr.notifier_lock in read mode and
+ * the vm->resv. For stable reading, BO's resv or userptr
+ * vm->userptr.notifier_lock in read mode is required. Can be
+ * opportunistically read with READ_ONCE outside of locks.
+ */
u8 tile_invalidated;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u8 tile_mask;
/**
- * @tile_present: GT mask of binding are present for this VMA.
+ * @tile_present: Tile mask of binding are present for this VMA.
* protected by vm->lock, vm->resv and for userptrs,
* vm->userptr.notifier_lock for writing. Needs either for reading,
* but if reading is done under the vm->lock only, it needs to be held
@@ -382,6 +389,16 @@ struct xe_vma_op_unmap_range {
struct xe_svm_range *range;
};
+/** struct xe_vma_op_prefetch_range - VMA prefetch range operation */
+struct xe_vma_op_prefetch_range {
+ /** @range: xarray for SVM ranges data */
+ struct xarray range;
+ /** @ranges_count: number of svm ranges to map */
+ u32 ranges_count;
+ /** @region: memory region to prefetch to */
+ u32 region;
+};
+
/** enum xe_vma_op_flags - flags for VMA operation */
enum xe_vma_op_flags {
/** @XE_VMA_OP_COMMITTED: VMA operation committed */
@@ -424,6 +441,8 @@ struct xe_vma_op {
struct xe_vma_op_map_range map_range;
/** @unmap_range: VMA unmap range operation specific data */
struct xe_vma_op_unmap_range unmap_range;
+ /** @prefetch_range: VMA prefetch range operation specific data */
+ struct xe_vma_op_prefetch_range prefetch_range;
};
};
@@ -441,6 +460,9 @@ struct xe_vma_ops {
u32 num_syncs;
/** @pt_update_ops: page table update operations */
struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
+ /** @flag: signify the properties within xe_vma_ops*/
+#define XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH BIT(0)
+ u32 flags;
#ifdef TEST_VM_OPS_ERROR
/** @inject_error: inject error to test error handling */
bool inject_error;
diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c
index b378848d3b7b..3e573b0b7ebd 100644
--- a/drivers/gpu/drm/xe/xe_vsec.c
+++ b/drivers/gpu/drm/xe/xe_vsec.c
@@ -149,8 +149,8 @@ static int xe_guid_decode(u32 guid, int *index, u32 *offset)
return 0;
}
-static int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
- u32 count)
+int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset,
+ u32 count)
{
struct xe_device *xe = pdev_to_xe_device(pdev);
void __iomem *telem_addr = xe->mmio.regs + BMG_TELEMETRY_OFFSET;
diff --git a/drivers/gpu/drm/xe/xe_vsec.h b/drivers/gpu/drm/xe/xe_vsec.h
index 5777c53faec2..dabfb4e02d70 100644
--- a/drivers/gpu/drm/xe/xe_vsec.h
+++ b/drivers/gpu/drm/xe/xe_vsec.h
@@ -4,8 +4,12 @@
#ifndef _XE_VSEC_H_
#define _XE_VSEC_H_
+#include <linux/types.h>
+
+struct pci_dev;
struct xe_device;
void xe_vsec_init(struct xe_device *xe);
+int xe_pmt_telem_read(struct pci_dev *pdev, u32 guid, u64 *data, loff_t user_offset, u32 count);
#endif
diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c
index 67196baa4249..4a76de391abb 100644
--- a/drivers/gpu/drm/xe/xe_wa.c
+++ b/drivers/gpu/drm/xe/xe_wa.c
@@ -503,10 +503,6 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
},
- { XE_RTP_NAME("16018737384"),
- XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
- XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
- },
/*
* These two workarounds are the same, just applying to different
* engines. Although Wa_18032095049 (for the RCS) isn't required on
@@ -533,31 +529,38 @@ static const struct xe_rtp_entry_sr engine_was[] = {
/* Xe2_HPG */
{ XE_RTP_NAME("16018712365"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, XE2_ALLOC_DPA_STARVE_FIX_DIS))
},
{ XE_RTP_NAME("16018737384"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, XE_RTP_END_VERSION_UNDEFINED),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN, EARLY_EOT_DIS))
},
{ XE_RTP_NAME("14019988906"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FLSH_IGNORES_PSD))
},
{ XE_RTP_NAME("14019877138"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(XEHP_PSS_CHICKEN, FD_END_COLLECT))
},
{ XE_RTP_NAME("14020338487"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(ROW_CHICKEN3, XE2_EUPEND_CHK_FLUSH_DIS))
},
{ XE_RTP_NAME("18032247524"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, SEQUENTIAL_ACCESS_UPGRADE_DISABLE))
},
{ XE_RTP_NAME("14018471104"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0_UDW, ENABLE_SMP_LD_RENDER_SURFACE_CONTROL))
},
/*
@@ -566,7 +569,7 @@ static const struct xe_rtp_entry_sr engine_was[] = {
* apply this to all engines for simplicity.
*/
{ XE_RTP_NAME("16021639441"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002)),
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
GHWSP_CSB_REPORT_DIS |
PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
@@ -578,11 +581,12 @@ static const struct xe_rtp_entry_sr engine_was[] = {
XE_RTP_ACTIONS(SET(LSC_CHICKEN_BIT_0, WR_REQ_CHAINING_DIS))
},
{ XE_RTP_NAME("14021402888"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
},
- { XE_RTP_NAME("14021821874"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
+ { XE_RTP_NAME("14021821874, 14022954250"),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002),
+ FUNC(xe_rtp_match_first_render_or_compute)),
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, STK_ID_RESTRICT))
},
@@ -774,7 +778,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(INSTPM(RENDER_RING_BASE), ENABLE_SEMAPHORE_POLL_BIT))
},
{ XE_RTP_NAME("18033852989"),
- XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2004), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
},
{ XE_RTP_NAME("14021567978"),
@@ -807,7 +811,7 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN))
},
{ XE_RTP_NAME("14019386621"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(VF_SCRATCHPAD, XE2_VFG_TED_CREDIT_INTERFACE_DISABLE))
},
{ XE_RTP_NAME("14020756599"),
@@ -824,13 +828,17 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
DIS_AUTOSTRIP))
},
{ XE_RTP_NAME("15016589081"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
},
{ XE_RTP_NAME("22021007897"),
- XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_RULES(GRAPHICS_VERSION_RANGE(2001, 2002), ENGINE_CLASS(RENDER)),
XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN4, SBE_PUSH_CONSTANT_BEHIND_FIX_ENABLE))
},
+ { XE_RTP_NAME("18033852989"),
+ XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
+ XE_RTP_ACTIONS(SET(COMMON_SLICE_CHICKEN1, DISABLE_BOTTOM_CLIP_RECTANGLE_TEST))
+ },
/* Xe3_LPG */
{ XE_RTP_NAME("14021490052"),
diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules
index 9efc5accd43d..0ee74a5b2407 100644
--- a/drivers/gpu/drm/xe/xe_wa_oob.rules
+++ b/drivers/gpu/drm/xe/xe_wa_oob.rules
@@ -21,7 +21,8 @@
GRAPHICS_VERSION_RANGE(1270, 1274)
MEDIA_VERSION(1300)
PLATFORM(DG2)
-14018094691 GRAPHICS_VERSION(2004)
+14018094691 GRAPHICS_VERSION_RANGE(2001, 2002)
+ GRAPHICS_VERSION(2004)
14019882105 GRAPHICS_VERSION(2004), GRAPHICS_STEP(A0, B0)
18024947630 GRAPHICS_VERSION(2001)
GRAPHICS_VERSION(2004)
@@ -30,10 +31,10 @@
GRAPHICS_VERSION(2004)
13011645652 GRAPHICS_VERSION(2004)
GRAPHICS_VERSION(3001)
-14022293748 GRAPHICS_VERSION(2001)
+14022293748 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
-22019794406 GRAPHICS_VERSION(2001)
+22019794406 GRAPHICS_VERSION_RANGE(2001, 2002)
GRAPHICS_VERSION(2004)
GRAPHICS_VERSION_RANGE(3000, 3001)
22019338487 MEDIA_VERSION(2000)
@@ -59,3 +60,7 @@ no_media_l3 MEDIA_VERSION(3000)
MEDIA_VERSION_RANGE(1301, 3000)
16026508708 GRAPHICS_VERSION_RANGE(1200, 3001)
MEDIA_VERSION_RANGE(1300, 3000)
+
+# SoC workaround - currently applies to all platforms with the following
+# primary GT GMDID
+14022085890 GRAPHICS_VERSION(2001)
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 238cbb49963e..02e1feaa6115 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -2439,9 +2439,9 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
struct zynqmp_dp *dp;
int ret;
- dp = kzalloc(sizeof(*dp), GFP_KERNEL);
- if (!dp)
- return -ENOMEM;
+ dp = devm_drm_bridge_alloc(&pdev->dev, struct zynqmp_dp, bridge, &zynqmp_dp_bridge_funcs);
+ if (IS_ERR(dp))
+ return PTR_ERR(dp);
dp->dev = &pdev->dev;
dp->dpsub = dpsub;
@@ -2454,31 +2454,25 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
/* Acquire all resources (IOMEM, IRQ and PHYs). */
dp->iomem = devm_platform_ioremap_resource_byname(pdev, "dp");
- if (IS_ERR(dp->iomem)) {
- ret = PTR_ERR(dp->iomem);
- goto err_free;
- }
+ if (IS_ERR(dp->iomem))
+ return PTR_ERR(dp->iomem);
dp->irq = platform_get_irq(pdev, 0);
- if (dp->irq < 0) {
- ret = dp->irq;
- goto err_free;
- }
+ if (dp->irq < 0)
+ return dp->irq;
dp->reset = devm_reset_control_get(dp->dev, NULL);
- if (IS_ERR(dp->reset)) {
- ret = dev_err_probe(dp->dev, PTR_ERR(dp->reset),
+ if (IS_ERR(dp->reset))
+ return dev_err_probe(dp->dev, PTR_ERR(dp->reset),
"failed to get reset\n");
- goto err_free;
- }
ret = zynqmp_dp_reset(dp, true);
if (ret < 0)
- goto err_free;
+ return ret;
ret = zynqmp_dp_reset(dp, false);
if (ret < 0)
- goto err_free;
+ return ret;
ret = zynqmp_dp_phy_probe(dp);
if (ret)
@@ -2486,7 +2480,6 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub)
/* Initialize the bridge. */
bridge = &dp->bridge;
- bridge->funcs = &zynqmp_dp_bridge_funcs;
bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
| DRM_BRIDGE_OP_HPD;
bridge->type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -2539,8 +2532,6 @@ err_phy_exit:
zynqmp_dp_phy_exit(dp);
err_reset:
zynqmp_dp_reset(dp, true);
-err_free:
- kfree(dp);
return ret;
}
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
index 3a9544b97bc5..2764c4b17c5e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c
@@ -180,7 +180,6 @@ static int zynqmp_dpsub_parse_dt(struct zynqmp_dpsub *dpsub)
void zynqmp_dpsub_release(struct zynqmp_dpsub *dpsub)
{
kfree(dpsub->disp);
- kfree(dpsub->dp);
kfree(dpsub);
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 8ccb483204fa..73747d20df85 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -152,8 +152,8 @@ static __always_inline int __intel_idle(struct cpuidle_device *dev,
int index, bool irqoff)
{
struct cpuidle_state *state = &drv->states[index];
- unsigned long eax = flg2MWAIT(state->flags);
- unsigned long ecx = 1*irqoff; /* break on interrupt flag */
+ unsigned int eax = flg2MWAIT(state->flags);
+ unsigned int ecx = 1*irqoff; /* break on interrupt flag */
mwait_idle_with_hints(eax, ecx);
@@ -226,9 +226,9 @@ static __cpuidle int intel_idle_xstate(struct cpuidle_device *dev,
static __cpuidle int intel_idle_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
- unsigned long ecx = 1; /* break on interrupt flag */
struct cpuidle_state *state = &drv->states[index];
- unsigned long eax = flg2MWAIT(state->flags);
+ unsigned int eax = flg2MWAIT(state->flags);
+ unsigned int ecx = 1; /* break on interrupt flag */
if (state->flags & CPUIDLE_FLAG_INIT_XSTATE)
fpu_idle_fpregs();
@@ -2507,6 +2507,8 @@ static int __init intel_idle_init(void)
pr_debug("Local APIC timer is reliable in %s\n",
boot_cpu_has(X86_FEATURE_ARAT) ? "all C-states" : "C1");
+ arch_cpu_rescan_dead_smt_siblings();
+
return 0;
hp_setup_fail:
@@ -2518,7 +2520,7 @@ init_driver_fail:
return retval;
}
-device_initcall(intel_idle_init);
+subsys_initcall_sync(intel_idle_init);
/*
* We are not really modular, but we used to support that. Meaning we also
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index 61897d50162d..e58fe9d8b9e7 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -559,11 +559,11 @@ static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova,
{
unsigned int pd_index = iova_pd_index(iova);
struct tegra_smmu *smmu = as->smmu;
- struct tegra_pd *pd = as->pd;
+ u32 *pd = &as->pd->val[pd_index];
unsigned long offset = pd_index * sizeof(*pd);
/* Set the page directory entry first */
- pd->val[pd_index] = value;
+ *pd = value;
/* The flush the page directory entry from caches */
dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset,
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 862bdccb7439..dc2f4adac9bc 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -2034,9 +2034,6 @@ int b53_br_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge,
b53_get_vlan_entry(dev, pvid, vl);
vl->members &= ~BIT(port);
- if (vl->members == BIT(cpu_port))
- vl->members &= ~BIT(cpu_port);
- vl->untag = vl->members;
b53_set_vlan_entry(dev, pvid, vl);
}
@@ -2115,8 +2112,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge)
}
b53_get_vlan_entry(dev, pvid, vl);
- vl->members |= BIT(port) | BIT(cpu_port);
- vl->untag |= BIT(port) | BIT(cpu_port);
+ vl->members |= BIT(port);
b53_set_vlan_entry(dev, pvid, vl);
}
}
diff --git a/drivers/net/ethernet/airoha/airoha_regs.h b/drivers/net/ethernet/airoha/airoha_regs.h
index 04187eb40ec6..150c85995cc1 100644
--- a/drivers/net/ethernet/airoha/airoha_regs.h
+++ b/drivers/net/ethernet/airoha/airoha_regs.h
@@ -614,8 +614,9 @@
RX19_DONE_INT_MASK | RX18_DONE_INT_MASK | \
RX17_DONE_INT_MASK | RX16_DONE_INT_MASK)
-#define RX_DONE_INT_MASK (RX_DONE_HIGH_INT_MASK | RX_DONE_LOW_INT_MASK)
#define RX_DONE_HIGH_OFFSET fls(RX_DONE_HIGH_INT_MASK)
+#define RX_DONE_INT_MASK \
+ ((RX_DONE_HIGH_INT_MASK << RX_DONE_HIGH_OFFSET) | RX_DONE_LOW_INT_MASK)
#define INT_RX2_MASK(_n) \
((RX_NO_CPU_DSCP_HIGH_INT_MASK & (_n)) | \
diff --git a/drivers/net/ethernet/freescale/enetc/Kconfig b/drivers/net/ethernet/freescale/enetc/Kconfig
index e917132d3714..54b0f0a5a6bb 100644
--- a/drivers/net/ethernet/freescale/enetc/Kconfig
+++ b/drivers/net/ethernet/freescale/enetc/Kconfig
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config FSL_ENETC_CORE
tristate
+ select NXP_NETC_LIB if NXP_NTMP
help
This module supports common functionality between the PF and VF
drivers for the NXP ENETC controller.
@@ -22,6 +23,9 @@ config NXP_NETC_LIB
Switch, such as NETC Table Management Protocol (NTMP) 2.0, common tc
flower and debugfs interfaces and so on.
+config NXP_NTMP
+ bool
+
config FSL_ENETC
tristate "ENETC PF driver"
depends on PCI_MSI
@@ -45,7 +49,7 @@ config NXP_ENETC4
select FSL_ENETC_CORE
select FSL_ENETC_MDIO
select NXP_ENETC_PF_COMMON
- select NXP_NETC_LIB
+ select NXP_NTMP
select PHYLINK
select DIMLIB
help
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 3f089c3d47b2..d8595e84326d 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -477,10 +477,6 @@ static void e1000_down_and_stop(struct e1000_adapter *adapter)
cancel_delayed_work_sync(&adapter->phy_info_task);
cancel_delayed_work_sync(&adapter->fifo_stall_task);
-
- /* Only kill reset task if adapter is not resetting */
- if (!test_bit(__E1000_RESETTING, &adapter->flags))
- cancel_work_sync(&adapter->reset_task);
}
void e1000_down(struct e1000_adapter *adapter)
@@ -1266,6 +1262,10 @@ static void e1000_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
+ /* Only kill reset task if adapter is not resetting */
+ if (!test_bit(__E1000_RESETTING, &adapter->flags))
+ cancel_work_sync(&adapter->reset_task);
+
e1000_phy_hw_reset(hw);
kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 1120f8e4bb67..88e6bef69342 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1546,8 +1546,8 @@ static void i40e_cleanup_reset_vf(struct i40e_vf *vf)
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
*
- * Returns true if the VF is in reset, resets successfully, or resets
- * are disabled and false otherwise.
+ * Return: True if reset was performed successfully or if resets are disabled.
+ * False if reset is already in progress.
**/
bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
@@ -1566,7 +1566,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
/* If VF is being reset already we don't need to continue. */
if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states))
- return true;
+ return false;
i40e_trigger_vf_reset(vf, flr);
@@ -4328,7 +4328,10 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
if (reg & BIT(bit_idx))
/* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
- i40e_reset_vf(vf, true);
+ if (!i40e_reset_vf(vf, true)) {
+ /* At least one VF did not finish resetting, retry next time */
+ set_bit(__I40E_VFLR_EVENT_PENDING, pf->state);
+ }
}
return 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 2c0bb41809a4..81d7249d1149 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3209,6 +3209,17 @@ static void iavf_reset_task(struct work_struct *work)
}
continue_reset:
+ /* If we are still early in the state machine, just restart. */
+ if (adapter->state <= __IAVF_INIT_FAILED) {
+ iavf_shutdown_adminq(hw);
+ iavf_change_state(adapter, __IAVF_STARTUP);
+ iavf_startup(adapter);
+ queue_delayed_work(adapter->wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ netdev_unlock(netdev);
+ return;
+ }
+
/* We don't use netif_running() because it may be true prior to
* ndo_open() returning, so we can't assume it means all our open
* tasks have finished, since we're not holding the rtnl_lock here.
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index a6f0e5990be2..07f0d0a0f1e2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -79,6 +79,23 @@ iavf_poll_virtchnl_msg(struct iavf_hw *hw, struct iavf_arq_event_info *event,
return iavf_status_to_errno(status);
received_op =
(enum virtchnl_ops)le32_to_cpu(event->desc.cookie_high);
+
+ if (received_op == VIRTCHNL_OP_EVENT) {
+ struct iavf_adapter *adapter = hw->back;
+ struct virtchnl_pf_event *vpe =
+ (struct virtchnl_pf_event *)event->msg_buf;
+
+ if (vpe->event != VIRTCHNL_EVENT_RESET_IMPENDING)
+ continue;
+
+ dev_info(&adapter->pdev->dev, "Reset indication received from the PF\n");
+ if (!(adapter->flags & IAVF_FLAG_RESET_PENDING))
+ iavf_schedule_reset(adapter,
+ IAVF_FLAG_RESET_PENDING);
+
+ return -EIO;
+ }
+
if (op_to_poll == received_op)
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index b79a148ed0f2..55cad824c5b9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -2299,6 +2299,7 @@ static int ice_capture_crosststamp(ktime_t *device,
ts = ((u64)ts_hi << 32) | ts_lo;
system->cycles = ts;
system->cs_id = CSID_X86_ART;
+ system->use_nsecs = true;
/* Read Device source clock time */
ts_lo = rd32(hw, cfg->dev_time_l[tmr_idx]);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index ea078c9f5d15..3cb8d3bf9044 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -43,7 +43,6 @@
#include "en/fs_ethtool.h"
#define LANES_UNKNOWN 0
-#define MAX_LANES 8
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -1098,10 +1097,8 @@ static void get_link_properties(struct net_device *netdev,
speed = info->speed;
lanes = info->lanes;
duplex = DUPLEX_FULL;
- } else if (data_rate_oper) {
+ } else if (data_rate_oper)
speed = 100 * data_rate_oper;
- lanes = MAX_LANES;
- }
out:
link_ksettings->base.duplex = duplex;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f1d908f61134..fef418e1ed1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2028,9 +2028,8 @@ err_out:
return err;
}
-static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
+static bool mlx5_flow_has_geneve_opt(struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
void *headers_v = MLX5_ADDR_OF(fte_match_param,
spec->match_value,
misc_parameters_3);
@@ -2069,7 +2068,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
}
complete_all(&flow->del_hw_done);
- if (mlx5_flow_has_geneve_opt(flow))
+ if (mlx5_flow_has_geneve_opt(&attr->parse_attr->spec))
mlx5_geneve_tlv_option_del(priv->mdev->geneve);
if (flow->decap_route)
@@ -2574,12 +2573,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
if (err) {
- kvfree(tmp_spec);
NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
- return err;
+ } else {
+ err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
}
- err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
+ if (mlx5_flow_has_geneve_opt(tmp_spec))
+ mlx5_geneve_tlv_option_del(priv->mdev->geneve);
kvfree(tmp_spec);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 7fb8a3381f84..4917d185d0c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1295,12 +1295,15 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
- if (mlx5_core_ec_sriov_enabled(esw->dev)) {
- ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
- enabled_events);
- if (ret)
- goto ec_vf_err;
- }
+ }
+
+ /* Enable ECVF vports */
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ ret = mlx5_eswitch_load_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs,
+ enabled_events);
+ if (ret)
+ goto ec_vf_err;
}
/* Enable VF vports */
@@ -1331,9 +1334,11 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_eswitch_unload_ec_vf_vports(esw,
+ esw->esw_funcs.num_ec_vfs);
+
if (mlx5_ecpf_vport_exists(esw->dev)) {
- if (mlx5_core_ec_sriov_enabled(esw->dev))
- mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_ECPF);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 23a7e8e7adfa..a8046200d376 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -2228,6 +2228,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct mlx5_flow_handle *rule;
struct match_list *iter;
bool take_write = false;
+ bool try_again = false;
struct fs_fte *fte;
u64 version = 0;
int err;
@@ -2292,6 +2293,7 @@ skip_search:
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
if (!g->node.active) {
+ try_again = true;
up_write_ref_node(&g->node, false);
continue;
}
@@ -2313,7 +2315,8 @@ skip_search:
tree_put_node(&fte->node, false);
return rule;
}
- rule = ERR_PTR(-ENOENT);
+ err = try_again ? -EAGAIN : -ENOENT;
+ rule = ERR_PTR(err);
out:
kmem_cache_free(steering->ftes_cache, fte);
return rule;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 972e8e9df585..9bc9bd83c232 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -291,7 +291,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function)
static int alloc_system_page(struct mlx5_core_dev *dev, u32 function)
{
struct device *device = mlx5_core_dma_dev(dev);
- int nid = dev_to_node(device);
+ int nid = dev->priv.numa_node;
struct page *page;
u64 zero_addr = 1;
u64 addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
index fb62f3bc4bd4..447ea3f8722c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/action.c
@@ -1370,8 +1370,8 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
struct mlx5hws_cmd_set_fte_attr fte_attr = {0};
struct mlx5hws_cmd_forward_tbl *fw_island;
struct mlx5hws_action *action;
- u32 i /*, packet_reformat_id*/;
- int ret;
+ int ret, last_dest_idx = -1;
+ u32 i;
if (num_dest <= 1) {
mlx5hws_err(ctx, "Action must have multiple dests\n");
@@ -1401,11 +1401,8 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
dest_list[i].destination_id = dests[i].dest->dest_obj.obj_id;
fte_attr.action_flags |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
fte_attr.ignore_flow_level = ignore_flow_level;
- /* ToDo: In SW steering we have a handling of 'go to WIRE'
- * destination here by upper layer setting 'is_wire_ft' flag
- * if the destination is wire.
- * This is because uplink should be last dest in the list.
- */
+ if (dests[i].is_wire_ft)
+ last_dest_idx = i;
break;
case MLX5HWS_ACTION_TYP_VPORT:
dest_list[i].destination_type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
@@ -1429,6 +1426,9 @@ mlx5hws_action_create_dest_array(struct mlx5hws_context *ctx,
}
}
+ if (last_dest_idx != -1)
+ swap(dest_list[last_dest_idx], dest_list[num_dest - 1]);
+
fte_attr.dests_num = num_dest;
fte_attr.dests = dest_list;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
index 70768953a4f6..ca7501c57468 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/bwc_complex.c
@@ -1070,7 +1070,7 @@ hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_bwc_matcher *bwc_matcher = bwc_rule->bwc_matcher;
struct mlx5hws_bwc_complex_rule_hash_node *node, *old_node;
struct rhashtable *refcount_hash;
- int i;
+ int ret, i;
bwc_rule->complex_hash_node = NULL;
@@ -1078,7 +1078,11 @@ hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
if (unlikely(!node))
return -ENOMEM;
- node->tag = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL);
+ ret = ida_alloc(&bwc_matcher->complex->metadata_ida, GFP_KERNEL);
+ if (ret < 0)
+ goto err_free_node;
+ node->tag = ret;
+
refcount_set(&node->refcount, 1);
/* Clear match buffer - turn off all the unrelated fields
@@ -1094,6 +1098,11 @@ hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
old_node = rhashtable_lookup_get_insert_fast(refcount_hash,
&node->hash_node,
hws_refcount_hash);
+ if (IS_ERR(old_node)) {
+ ret = PTR_ERR(old_node);
+ goto err_free_ida;
+ }
+
if (old_node) {
/* Rule with the same tag already exists - update refcount */
refcount_inc(&old_node->refcount);
@@ -1112,6 +1121,12 @@ hws_bwc_rule_complex_hash_node_get(struct mlx5hws_bwc_rule *bwc_rule,
bwc_rule->complex_hash_node = node;
return 0;
+
+err_free_ida:
+ ida_free(&bwc_matcher->complex->metadata_ida, node->tag);
+err_free_node:
+ kfree(node);
+ return ret;
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
index 5cc0dc002ac1..d45e1145d197 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/definer.c
@@ -785,6 +785,9 @@ hws_definer_conv_outer(struct mlx5hws_definer_conv_data *cd,
HWS_SET_HDR(fc, match_param, IP_PROTOCOL_O,
outer_headers.ip_protocol,
eth_l3_outer.protocol_next_header);
+ HWS_SET_HDR(fc, match_param, IP_VERSION_O,
+ outer_headers.ip_version,
+ eth_l3_outer.ip_version);
HWS_SET_HDR(fc, match_param, IP_TTL_O,
outer_headers.ttl_hoplimit,
eth_l3_outer.time_to_live_hop_limit);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
index 9d1c0e4b224a..bf4643d0ce17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/fs_hws.c
@@ -966,6 +966,9 @@ static int mlx5_fs_fte_get_hws_actions(struct mlx5_flow_root_namespace *ns,
switch (attr->type) {
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
dest_action = mlx5_fs_get_dest_action_ft(fs_ctx, dst);
+ if (dst->dest_attr.ft->flags &
+ MLX5_FLOW_TABLE_UPLINK_VPORT)
+ dest_actions[num_dest_actions].is_wire_ft = true;
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
dest_action = mlx5_fs_get_dest_action_table_num(fs_ctx,
@@ -1357,6 +1360,7 @@ mlx5_cmd_hws_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
pkt_reformat->fs_hws_action.pr_data = pr_data;
}
+ mutex_init(&pkt_reformat->fs_hws_action.lock);
pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_HWS;
pkt_reformat->fs_hws_action.hws_action = hws_action;
return 0;
@@ -1503,7 +1507,6 @@ static int mlx5_cmd_hws_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
err = -ENOMEM;
goto release_mh;
}
- mutex_init(&modify_hdr->fs_hws_action.lock);
modify_hdr->fs_hws_action.mh_data = mh_data;
modify_hdr->fs_hws_action.fs_pool = pool;
modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
index 9bbadc4d8a0b..d8ac6c196211 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/hws/mlx5hws.h
@@ -213,6 +213,7 @@ struct mlx5hws_action_dest_attr {
struct mlx5hws_action *dest;
/* Optional reformat action */
struct mlx5hws_action *reformat;
+ bool is_wire_ft;
};
/**
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 3d315e30ee47..7edbe76b5455 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -247,15 +247,39 @@ static sci_t make_sci(const u8 *addr, __be16 port)
return sci;
}
-static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
+static sci_t macsec_active_sci(struct macsec_secy *secy)
{
- sci_t sci;
+ struct macsec_rx_sc *rx_sc = rcu_dereference_bh(secy->rx_sc);
+
+ /* Case single RX SC */
+ if (rx_sc && !rcu_dereference_bh(rx_sc->next))
+ return (rx_sc->active) ? rx_sc->sci : 0;
+ /* Case no RX SC or multiple */
+ else
+ return 0;
+}
+
+static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present,
+ struct macsec_rxh_data *rxd)
+{
+ struct macsec_dev *macsec;
+ sci_t sci = 0;
- if (sci_present)
+ /* SC = 1 */
+ if (sci_present) {
memcpy(&sci, hdr->secure_channel_id,
sizeof(hdr->secure_channel_id));
- else
+ /* SC = 0; ES = 0 */
+ } else if ((!(hdr->tci_an & (MACSEC_TCI_ES | MACSEC_TCI_SC))) &&
+ (list_is_singular(&rxd->secys))) {
+ /* Only one SECY should exist on this scenario */
+ macsec = list_first_or_null_rcu(&rxd->secys, struct macsec_dev,
+ secys);
+ if (macsec)
+ return macsec_active_sci(&macsec->secy);
+ } else {
sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
+ }
return sci;
}
@@ -1109,7 +1133,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
struct macsec_rxh_data *rxd;
struct macsec_dev *macsec;
unsigned int len;
- sci_t sci;
+ sci_t sci = 0;
u32 hdr_pn;
bool cbit;
struct pcpu_rx_sc_stats *rxsc_stats;
@@ -1156,11 +1180,14 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
- sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
rcu_read_lock();
rxd = macsec_data_rcu(skb->dev);
+ sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci, rxd);
+ if (!sci)
+ goto drop_nosc;
+
list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
@@ -1283,6 +1310,7 @@ drop:
macsec_rxsa_put(rx_sa);
drop_nosa:
macsec_rxsc_put(rx_sc);
+drop_nosc:
rcu_read_unlock();
drop_direct:
kfree_skb(skb);
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4289ccd3e41b..176935a8645f 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -1252,7 +1252,6 @@ static int sysdata_append_release(struct netconsole_target *nt, int offset)
*/
static int prepare_extradata(struct netconsole_target *nt)
{
- u32 fields = SYSDATA_CPU_NR | SYSDATA_TASKNAME;
int extradata_len;
/* userdata was appended when configfs write helper was called
@@ -1260,7 +1259,7 @@ static int prepare_extradata(struct netconsole_target *nt)
*/
extradata_len = nt->userdata_length;
- if (!(nt->sysdata_fields & fields))
+ if (!nt->sysdata_fields)
goto out;
if (nt->sysdata_fields & SYSDATA_CPU_NR)
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index af545d42961c..fa5fbd97ad69 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -371,7 +371,8 @@ static int nsim_poll(struct napi_struct *napi, int budget)
int done;
done = nsim_rcv(rq, budget);
- napi_complete(napi);
+ if (done < budget)
+ napi_complete_done(napi, done);
return done;
}
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index a6bcb0fee863..fda2e27c1810 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -445,6 +445,9 @@ int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->read)
retval = bus->read(bus, addr, regnum);
else
@@ -474,6 +477,9 @@ int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->write)
err = bus->write(bus, addr, regnum, val);
else
@@ -535,6 +541,9 @@ int __mdiobus_c45_read(struct mii_bus *bus, int addr, int devad, u32 regnum)
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->read_c45)
retval = bus->read_c45(bus, addr, devad, regnum);
else
@@ -566,6 +575,9 @@ int __mdiobus_c45_write(struct mii_bus *bus, int addr, int devad, u32 regnum,
lockdep_assert_held_once(&bus->mdio_lock);
+ if (addr >= PHY_MAX_ADDR)
+ return -ENXIO;
+
if (bus->write_c45)
err = bus->write_c45(bus, addr, devad, regnum, val);
else
diff --git a/drivers/net/phy/phy_caps.c b/drivers/net/phy/phy_caps.c
index 703321689726..38417e288611 100644
--- a/drivers/net/phy/phy_caps.c
+++ b/drivers/net/phy/phy_caps.c
@@ -188,6 +188,9 @@ phy_caps_lookup_by_linkmode_rev(const unsigned long *linkmodes, bool fdx_only)
* When @exact is not set, we return either an exact match, or matching capabilities
* at lower speed, or the lowest matching speed, or NULL.
*
+ * Non-exact matches will try to return an exact speed and duplex match, but may
+ * return matching capabilities with same speed but a different duplex.
+ *
* Returns: a matched link_capabilities according to the above process, NULL
* otherwise.
*/
@@ -195,7 +198,7 @@ const struct link_capabilities *
phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
bool exact)
{
- const struct link_capabilities *lcap, *last = NULL;
+ const struct link_capabilities *lcap, *match = NULL, *last = NULL;
for_each_link_caps_desc_speed(lcap) {
if (linkmode_intersects(lcap->linkmodes, supported)) {
@@ -204,16 +207,19 @@ phy_caps_lookup(int speed, unsigned int duplex, const unsigned long *supported,
if (lcap->speed == speed && lcap->duplex == duplex) {
return lcap;
} else if (!exact) {
- if (lcap->speed <= speed)
- return lcap;
+ if (!match && lcap->speed <= speed)
+ match = lcap;
+
+ if (lcap->speed < speed)
+ break;
}
}
}
- if (!exact)
- return last;
+ if (!match && !exact)
+ match = last;
- return NULL;
+ return match;
}
EXPORT_SYMBOL_GPL(phy_caps_lookup);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d6589b24c68d..44cba7acfe7d 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -10054,6 +10054,7 @@ static const struct usb_device_id rtl8152_table[] = {
{ USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041) },
{ USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff) },
{ USB_DEVICE(VENDOR_ID_TPLINK, 0x0601) },
+ { USB_DEVICE(VENDOR_ID_TPLINK, 0x0602) },
{ USB_DEVICE(VENDOR_ID_DLINK, 0xb301) },
{ USB_DEVICE(VENDOR_ID_DELL, 0xb097) },
{ USB_DEVICE(VENDOR_ID_ASUS, 0x1976) },
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e58a0f1b5c5b..a3046142cb8e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -909,7 +909,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
/* NAPI functions as RCU section */
peer_dev = rcu_dereference_check(priv->peer, rcu_read_lock_bh_held());
- peer_txq = netdev_get_tx_queue(peer_dev, queue_idx);
+ peer_txq = peer_dev ? netdev_get_tx_queue(peer_dev, queue_idx) : NULL;
for (i = 0; i < budget; i++) {
void *ptr = __ptr_ring_consume(&rq->xdp_ring);
@@ -959,7 +959,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget,
rq->stats.vs.xdp_packets += done;
u64_stats_update_end(&rq->stats.syncp);
- if (unlikely(netif_tx_queue_stopped(peer_txq)))
+ if (peer_txq && unlikely(netif_tx_queue_stopped(peer_txq)))
netif_tx_wake_queue(peer_txq);
return done;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 8c7ffea0fa44..07fe05384cdf 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4,6 +4,7 @@
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
*/
#include "mac.h"
@@ -1022,6 +1023,26 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
return ar->last_wmi_vdev_start_status;
}
+static inline int ath10k_vdev_delete_sync(struct ath10k *ar)
+{
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map))
+ return 0;
+
+ if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
+ return -ESHUTDOWN;
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH10K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
{
struct cfg80211_chan_def *chandef = NULL;
@@ -5900,7 +5921,6 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = (void *)vif->drv_priv;
struct ath10k_peer *peer;
- unsigned long time_left;
int ret;
int i;
@@ -5940,13 +5960,10 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
arvif->vdev_id, ret);
- if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
- time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
- ATH10K_VDEV_DELETE_TIMEOUT_HZ);
- if (time_left == 0) {
- ath10k_warn(ar, "Timeout in receiving vdev delete response\n");
- goto out;
- }
+ ret = ath10k_vdev_delete_sync(ar);
+ if (ret) {
+ ath10k_warn(ar, "Error in receiving vdev delete response: %d\n", ret);
+ goto out;
}
/* Some firmware revisions don't notify host about self-peer removal
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index b2bf9d72b92f..d51f2e5a79a4 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -938,7 +938,9 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
dev_set_threaded(ar->napi_dev, true);
ath10k_core_napi_enable(ar);
- ath10k_snoc_irq_enable(ar);
+ /* IRQs are left enabled when we restart due to a firmware crash */
+ if (!test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags))
+ ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags);
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 2e9f8a5e61e4..22a101136135 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -990,6 +990,7 @@ void ath11k_fw_stats_init(struct ath11k *ar)
INIT_LIST_HEAD(&ar->fw_stats.bcn);
init_completion(&ar->fw_stats_complete);
+ init_completion(&ar->fw_stats_done);
}
void ath11k_fw_stats_free(struct ath11k_fw_stats *stats)
@@ -2134,6 +2135,20 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
{
int ret;
+ switch (ath11k_crypto_mode) {
+ case ATH11K_CRYPT_MODE_SW:
+ set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ case ATH11K_CRYPT_MODE_HW:
+ clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
+ clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
+ break;
+ default:
+ ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
+ return -EINVAL;
+ }
+
ret = ath11k_core_start_firmware(ab, ab->fw_mode);
if (ret) {
ath11k_err(ab, "failed to start firmware: %d\n", ret);
@@ -2152,20 +2167,6 @@ int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
goto err_firmware_stop;
}
- switch (ath11k_crypto_mode) {
- case ATH11K_CRYPT_MODE_SW:
- set_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
- set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
- break;
- case ATH11K_CRYPT_MODE_HW:
- clear_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags);
- clear_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
- break;
- default:
- ath11k_info(ab, "invalid crypto_mode: %d\n", ath11k_crypto_mode);
- return -EINVAL;
- }
-
if (ath11k_frame_mode == ATH11K_HW_TXRX_RAW)
set_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags);
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 339d4fca1ed5..6b2f207975e3 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -600,6 +600,8 @@ struct ath11k_fw_stats {
struct list_head pdevs;
struct list_head vdevs;
struct list_head bcn;
+ u32 num_vdev_recvd;
+ u32 num_bcn_recvd;
};
struct ath11k_dbg_htt_stats {
@@ -784,7 +786,7 @@ struct ath11k {
u8 alpha2[REG_ALPHA2_LEN + 1];
struct ath11k_fw_stats fw_stats;
struct completion fw_stats_complete;
- bool fw_stats_done;
+ struct completion fw_stats_done;
/* protected by conf_mutex */
bool ps_state_enable;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index bf192529e3fe..5d46f8e4c231 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -93,57 +93,14 @@ void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
spin_unlock_bh(&dbr_data->lock);
}
-static void ath11k_debugfs_fw_stats_reset(struct ath11k *ar)
-{
- spin_lock_bh(&ar->data_lock);
- ar->fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
- ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
- spin_unlock_bh(&ar->data_lock);
-}
-
void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats)
{
struct ath11k_base *ab = ar->ab;
- struct ath11k_pdev *pdev;
- bool is_end;
- static unsigned int num_vdev, num_bcn;
- size_t total_vdevs_started = 0;
- int i;
-
- /* WMI_REQUEST_PDEV_STAT request has been already processed */
-
- if (stats->stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
- ar->fw_stats_done = true;
- return;
- }
-
- if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
- if (list_empty(&stats->vdevs)) {
- ath11k_warn(ab, "empty vdev stats");
- return;
- }
- /* FW sends all the active VDEV stats irrespective of PDEV,
- * hence limit until the count of all VDEVs started
- */
- for (i = 0; i < ab->num_radios; i++) {
- pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar)
- total_vdevs_started += ar->num_started_vdevs;
- }
-
- is_end = ((++num_vdev) == total_vdevs_started);
-
- list_splice_tail_init(&stats->vdevs,
- &ar->fw_stats.vdevs);
-
- if (is_end) {
- ar->fw_stats_done = true;
- num_vdev = 0;
- }
- return;
- }
+ bool is_end = true;
+ /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_RSSI_PER_CHAIN_STAT and
+ * WMI_REQUEST_VDEV_STAT requests have been already processed.
+ */
if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
if (list_empty(&stats->bcn)) {
ath11k_warn(ab, "empty bcn stats");
@@ -152,97 +109,18 @@ void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *
/* Mark end until we reached the count of all started VDEVs
* within the PDEV
*/
- is_end = ((++num_bcn) == ar->num_started_vdevs);
+ if (ar->num_started_vdevs)
+ is_end = ((++ar->fw_stats.num_bcn_recvd) ==
+ ar->num_started_vdevs);
list_splice_tail_init(&stats->bcn,
&ar->fw_stats.bcn);
- if (is_end) {
- ar->fw_stats_done = true;
- num_bcn = 0;
- }
+ if (is_end)
+ complete(&ar->fw_stats_done);
}
}
-static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
- struct stats_request_params *req_param)
-{
- struct ath11k_base *ab = ar->ab;
- unsigned long timeout, time_left;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- /* FW stats can get split when exceeding the stats data buffer limit.
- * In that case, since there is no end marking for the back-to-back
- * received 'update stats' event, we keep a 3 seconds timeout in case,
- * fw_stats_done is not marked yet
- */
- timeout = jiffies + secs_to_jiffies(3);
-
- ath11k_debugfs_fw_stats_reset(ar);
-
- reinit_completion(&ar->fw_stats_complete);
-
- ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
-
- if (ret) {
- ath11k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
-
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
-
- if (!time_left)
- return -ETIMEDOUT;
-
- for (;;) {
- if (time_after(jiffies, timeout))
- break;
-
- spin_lock_bh(&ar->data_lock);
- if (ar->fw_stats_done) {
- spin_unlock_bh(&ar->data_lock);
- break;
- }
- spin_unlock_bh(&ar->data_lock);
- }
- return 0;
-}
-
-int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
- u32 vdev_id, u32 stats_id)
-{
- struct ath11k_base *ab = ar->ab;
- struct stats_request_params req_param;
- int ret;
-
- mutex_lock(&ar->conf_mutex);
-
- if (ar->state != ATH11K_STATE_ON) {
- ret = -ENETDOWN;
- goto err_unlock;
- }
-
- req_param.pdev_id = pdev_id;
- req_param.vdev_id = vdev_id;
- req_param.stats_id = stats_id;
-
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
- if (ret)
- ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
-
- ath11k_dbg(ab, ATH11K_DBG_WMI,
- "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
- pdev_id, vdev_id, stats_id);
-
-err_unlock:
- mutex_unlock(&ar->conf_mutex);
-
- return ret;
-}
-
static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
{
struct ath11k *ar = inode->i_private;
@@ -268,7 +146,7 @@ static int ath11k_open_pdev_stats(struct inode *inode, struct file *file)
req_param.vdev_id = 0;
req_param.stats_id = WMI_REQUEST_PDEV_STAT;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
goto err_free;
@@ -339,7 +217,7 @@ static int ath11k_open_vdev_stats(struct inode *inode, struct file *file)
req_param.vdev_id = 0;
req_param.stats_id = WMI_REQUEST_VDEV_STAT;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
goto err_free;
@@ -415,7 +293,7 @@ static int ath11k_open_bcn_stats(struct inode *inode, struct file *file)
continue;
req_param.vdev_id = arvif->vdev_id;
- ret = ath11k_debugfs_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
if (ret) {
ath11k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
goto err_free;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index a39e458637b0..ed7fec177588 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_DEBUGFS_H_
@@ -273,8 +273,6 @@ void ath11k_debugfs_unregister(struct ath11k *ar);
void ath11k_debugfs_fw_stats_process(struct ath11k *ar, struct ath11k_fw_stats *stats);
void ath11k_debugfs_fw_stats_init(struct ath11k *ar);
-int ath11k_debugfs_get_fw_stats(struct ath11k *ar, u32 pdev_id,
- u32 vdev_id, u32 stats_id);
static inline bool ath11k_debugfs_is_pktlog_lite_mode_enabled(struct ath11k *ar)
{
@@ -381,12 +379,6 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
return 0;
}
-static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
- u32 pdev_id, u32 vdev_id, u32 stats_id)
-{
- return 0;
-}
-
static inline void
ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
enum wmi_direct_buffer_module id,
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 08d7b136851f..13301ca317a5 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -8997,6 +8997,81 @@ static void ath11k_mac_put_chain_rssi(struct station_info *sinfo,
}
}
+static void ath11k_mac_fw_stats_reset(struct ath11k *ar)
+{
+ spin_lock_bh(&ar->data_lock);
+ ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
+ ath11k_fw_stats_vdevs_free(&ar->fw_stats.vdevs);
+ ar->fw_stats.num_vdev_recvd = 0;
+ ar->fw_stats.num_bcn_recvd = 0;
+ spin_unlock_bh(&ar->data_lock);
+}
+
+int ath11k_mac_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param)
+{
+ struct ath11k_base *ab = ar->ab;
+ unsigned long time_left;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_mac_fw_stats_reset(ar);
+
+ reinit_completion(&ar->fw_stats_complete);
+ reinit_completion(&ar->fw_stats_done);
+
+ ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
+
+ if (ret) {
+ ath11k_warn(ab, "could not request fw stats (%d)\n",
+ ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ /* FW stats can get split when exceeding the stats data buffer limit.
+ * In that case, since there is no end marking for the back-to-back
+ * received 'update stats' event, we keep a 3 seconds timeout in case,
+ * fw_stats_done is not marked yet
+ */
+ time_left = wait_for_completion_timeout(&ar->fw_stats_done, 3 * HZ);
+ if (!time_left)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ath11k_mac_get_fw_stats(struct ath11k *ar, u32 pdev_id,
+ u32 vdev_id, u32 stats_id)
+{
+ struct ath11k_base *ab = ar->ab;
+ struct stats_request_params req_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state != ATH11K_STATE_ON)
+ return -ENETDOWN;
+
+ req_param.pdev_id = pdev_id;
+ req_param.vdev_id = vdev_id;
+ req_param.stats_id = stats_id;
+
+ ret = ath11k_mac_fw_stats_request(ar, &req_param);
+ if (ret)
+ ath11k_warn(ab, "failed to request fw stats: %d\n", ret);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "debug get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+ pdev_id, vdev_id, stats_id);
+
+ return ret;
+}
+
static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -9031,11 +9106,12 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
ath11k_mac_put_chain_rssi(sinfo, arsta, "ppdu", false);
+ mutex_lock(&ar->conf_mutex);
if (!(sinfo->filled & BIT_ULL(NL80211_STA_INFO_CHAIN_SIGNAL)) &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
ar->ab->hw_params.supports_rssi_stats &&
- !ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
- WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
+ !ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_RSSI_PER_CHAIN_STAT)) {
ath11k_mac_put_chain_rssi(sinfo, arsta, "fw stats", true);
}
@@ -9043,9 +9119,10 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
if (!signal &&
arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA &&
ar->ab->hw_params.supports_rssi_stats &&
- !(ath11k_debugfs_get_fw_stats(ar, ar->pdev->pdev_id, 0,
- WMI_REQUEST_VDEV_STAT)))
+ !(ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_VDEV_STAT)))
signal = arsta->rssi_beacon;
+ mutex_unlock(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"sta statistics db2dbm %u rssi comb %d rssi beacon %d\n",
@@ -9380,38 +9457,6 @@ exit:
return ret;
}
-static int ath11k_fw_stats_request(struct ath11k *ar,
- struct stats_request_params *req_param)
-{
- struct ath11k_base *ab = ar->ab;
- unsigned long time_left;
- int ret;
-
- lockdep_assert_held(&ar->conf_mutex);
-
- spin_lock_bh(&ar->data_lock);
- ar->fw_stats_done = false;
- ath11k_fw_stats_pdevs_free(&ar->fw_stats.pdevs);
- spin_unlock_bh(&ar->data_lock);
-
- reinit_completion(&ar->fw_stats_complete);
-
- ret = ath11k_wmi_send_stats_request_cmd(ar, req_param);
- if (ret) {
- ath11k_warn(ab, "could not request fw stats (%d)\n",
- ret);
- return ret;
- }
-
- time_left = wait_for_completion_timeout(&ar->fw_stats_complete,
- 1 * HZ);
-
- if (!time_left)
- return -ETIMEDOUT;
-
- return 0;
-}
-
static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id,
@@ -9419,7 +9464,6 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
- struct stats_request_params req_param = {0};
struct ath11k_fw_stats_pdev *pdev;
int ret;
@@ -9431,9 +9475,6 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
*/
mutex_lock(&ar->conf_mutex);
- if (ar->state != ATH11K_STATE_ON)
- goto err_fallback;
-
/* Firmware doesn't provide Tx power during CAC hence no need to fetch
* the stats.
*/
@@ -9442,10 +9483,8 @@ static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
return -EAGAIN;
}
- req_param.pdev_id = ar->pdev->pdev_id;
- req_param.stats_id = WMI_REQUEST_PDEV_STAT;
-
- ret = ath11k_fw_stats_request(ar, &req_param);
+ ret = ath11k_mac_get_fw_stats(ar, ar->pdev->pdev_id, 0,
+ WMI_REQUEST_PDEV_STAT);
if (ret) {
ath11k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
goto err_fallback;
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index f5800fbecff8..5e61eea1bb03 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_MAC_H
@@ -179,4 +179,6 @@ int ath11k_mac_vif_set_keepalive(struct ath11k_vif *arvif,
void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *ctx);
+int ath11k_mac_fw_stats_request(struct ath11k *ar,
+ struct stats_request_params *req_param);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index d7f852bebf4a..56af2e9634f4 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -8158,6 +8158,11 @@ static void ath11k_peer_assoc_conf_event(struct ath11k_base *ab, struct sk_buff
static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *skb)
{
struct ath11k_fw_stats stats = {};
+ size_t total_vdevs_started = 0;
+ struct ath11k_pdev *pdev;
+ bool is_end = true;
+ int i;
+
struct ath11k *ar;
int ret;
@@ -8184,25 +8189,57 @@ static void ath11k_update_stats_event(struct ath11k_base *ab, struct sk_buff *sk
spin_lock_bh(&ar->data_lock);
- /* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+ /* WMI_REQUEST_PDEV_STAT, WMI_REQUEST_VDEV_STAT and
+ * WMI_REQUEST_RSSI_PER_CHAIN_STAT can be requested via mac ops or via
* debugfs fw stats. Therefore, processing it separately.
*/
if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
- ar->fw_stats_done = true;
+ complete(&ar->fw_stats_done);
+ goto complete;
+ }
+
+ if (stats.stats_id == WMI_REQUEST_RSSI_PER_CHAIN_STAT) {
+ complete(&ar->fw_stats_done);
goto complete;
}
- /* WMI_REQUEST_VDEV_STAT, WMI_REQUEST_BCN_STAT and WMI_REQUEST_RSSI_PER_CHAIN_STAT
- * are currently requested only via debugfs fw stats. Hence, processing these
- * in debugfs context
+ if (stats.stats_id == WMI_REQUEST_VDEV_STAT) {
+ if (list_empty(&stats.vdevs)) {
+ ath11k_warn(ab, "empty vdev stats");
+ goto complete;
+ }
+ /* FW sends all the active VDEV stats irrespective of PDEV,
+ * hence limit until the count of all VDEVs started
+ */
+ for (i = 0; i < ab->num_radios; i++) {
+ pdev = rcu_dereference(ab->pdevs_active[i]);
+ if (pdev && pdev->ar)
+ total_vdevs_started += ar->num_started_vdevs;
+ }
+
+ if (total_vdevs_started)
+ is_end = ((++ar->fw_stats.num_vdev_recvd) ==
+ total_vdevs_started);
+
+ list_splice_tail_init(&stats.vdevs,
+ &ar->fw_stats.vdevs);
+
+ if (is_end)
+ complete(&ar->fw_stats_done);
+
+ goto complete;
+ }
+
+ /* WMI_REQUEST_BCN_STAT is currently requested only via debugfs fw stats.
+ * Hence, processing it in debugfs context
*/
ath11k_debugfs_fw_stats_process(ar, &stats);
complete:
complete(&ar->fw_stats_complete);
- rcu_read_unlock();
spin_unlock_bh(&ar->data_lock);
+ rcu_read_unlock();
/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
* at this point, no need to free the individual list.
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 31d851d8e688..ebc0560d40e3 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -2129,7 +2129,8 @@ int ath12k_core_init(struct ath12k_base *ab)
if (!ag) {
mutex_unlock(&ath12k_hw_group_mutex);
ath12k_warn(ab, "unable to get hw group\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_unregister_notifier;
}
mutex_unlock(&ath12k_hw_group_mutex);
@@ -2144,7 +2145,7 @@ int ath12k_core_init(struct ath12k_base *ab)
if (ret) {
mutex_unlock(&ag->mutex);
ath12k_warn(ab, "unable to create hw group\n");
- goto err;
+ goto err_destroy_hw_group;
}
}
@@ -2152,9 +2153,12 @@ int ath12k_core_init(struct ath12k_base *ab)
return 0;
-err:
+err_destroy_hw_group:
ath12k_core_hw_group_destroy(ab->ag);
ath12k_core_hw_group_unassign(ab);
+err_unregister_notifier:
+ ath12k_core_panic_notifier_unregister(ab);
+
return ret;
}
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index 0ee9c6b26dab..c1750b5dc03c 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -585,7 +585,8 @@ enum hal_reo_cmd_type {
* or cache was blocked
* @HAL_REO_CMD_FAILED: Command execution failed, could be due to
* invalid queue desc
- * @HAL_REO_CMD_RESOURCE_BLOCKED:
+ * @HAL_REO_CMD_RESOURCE_BLOCKED: Command could not be executed because
+ * one or more descriptors were blocked
* @HAL_REO_CMD_DRAIN:
*/
enum hal_reo_cmd_status {
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index 7e2cf0fb2085..8254dc10b53b 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -951,6 +951,8 @@ static const struct ath12k_hw_regs qcn9274_v1_regs = {
.hal_umac_ce0_dest_reg_base = 0x01b81000,
.hal_umac_ce1_src_reg_base = 0x01b82000,
.hal_umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e38338,
};
static const struct ath12k_hw_regs qcn9274_v2_regs = {
@@ -1042,6 +1044,8 @@ static const struct ath12k_hw_regs qcn9274_v2_regs = {
.hal_umac_ce0_dest_reg_base = 0x01b81000,
.hal_umac_ce1_src_reg_base = 0x01b82000,
.hal_umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e38338,
};
static const struct ath12k_hw_regs ipq5332_regs = {
@@ -1215,6 +1219,8 @@ static const struct ath12k_hw_regs wcn7850_regs = {
.hal_umac_ce0_dest_reg_base = 0x01b81000,
.hal_umac_ce1_src_reg_base = 0x01b82000,
.hal_umac_ce1_dest_reg_base = 0x01b83000,
+
+ .gcc_gcc_pcie_hot_rst = 0x1e40304,
};
static const struct ath12k_hw_hal_params ath12k_hw_hal_params_qcn9274 = {
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 0fbc17649df4..0a75bc5abfa2 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -375,6 +375,8 @@ struct ath12k_hw_regs {
u32 hal_reo_cmd_ring_base;
u32 hal_reo_status_ring_base;
+
+ u32 gcc_gcc_pcie_hot_rst;
};
static inline const char *ath12k_bd_ie_type_str(enum ath12k_bd_ie_type type)
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 489d546390fc..1f3cfd9b89fd 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -292,10 +292,10 @@ static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
- val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
val |= GCC_GCC_PCIE_HOT_RST_VAL;
- ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
- val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
+ ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST(ab), val);
+ val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index 0b4c459d6d8e..d1ec8aad7f6c 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -28,7 +28,9 @@
#define PCIE_PCIE_PARF_LTSSM 0x1e081b0
#define PARM_LTSSM_VALUE 0x111
-#define GCC_GCC_PCIE_HOT_RST 0x1e38338
+#define GCC_GCC_PCIE_HOT_RST(ab) \
+ ((ab)->hw_params->regs->gcc_gcc_pcie_hot_rst)
+
#define GCC_GCC_PCIE_HOT_RST_VAL 0x10
#define PCIE_PCIE_INT_ALL_CLEAR 0x1e08228
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 67172385a5d6..89d4394cedcf 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -179,9 +179,11 @@ void wil_mask_irq(struct wil6210_priv *wil)
wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
- wil6210_mask_irq_tx_edma(wil);
+ if (wil->use_enhanced_dma_hw)
+ wil6210_mask_irq_tx_edma(wil);
wil6210_mask_irq_rx(wil);
- wil6210_mask_irq_rx_edma(wil);
+ if (wil->use_enhanced_dma_hw)
+ wil6210_mask_irq_rx_edma(wil);
wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
@@ -190,10 +192,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
{
wil_dbg_irq(wil, "unmask_irq\n");
- wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
- WIL_ICR_ICC_VALUE);
- wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
- WIL_ICR_ICC_VALUE);
+ if (wil->use_enhanced_dma_hw) {
+ wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
+ WIL_ICR_ICC_VALUE);
+ }
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_MISC_VALUE);
wil_w(wil, RGF_INT_GEN_TX_ICR + offsetof(struct RGF_ICR, ICC),
@@ -845,10 +849,12 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
offsetof(struct RGF_ICR, ICR));
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_TX_ICR) +
offsetof(struct RGF_ICR, ICR));
- wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
- offsetof(struct RGF_ICR, ICR));
- wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
- offsetof(struct RGF_ICR, ICR));
+ if (wil->use_enhanced_dma_hw) {
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_RX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ wil_clear32(wil->csr + HOSTADDR(RGF_INT_GEN_TX_ICR) +
+ offsetof(struct RGF_ICR, ICR));
+ }
wil_clear32(wil->csr + HOSTADDR(RGF_DMA_EP_MISC_ICR) +
offsetof(struct RGF_ICR, ICR));
wmb(); /* make sure write completed */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 656f8b06c27b..0a9e0dbb58fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1501,11 +1501,27 @@ static int _iwl_pci_resume(struct device *device, bool restore)
* Scratch value was altered, this means the device was powered off, we
* need to reset it completely.
* Note: MAC (bits 0:7) will be cleared upon suspend even with wowlan,
- * so assume that any bits there mean that the device is usable.
+ * but not bits [15:8]. So if we have bits set in lower word, assume
+ * the device is alive.
+ * For older devices, just try silently to grab the NIC.
*/
- if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ &&
- !iwl_read32(trans, CSR_FUNC_SCRATCH))
- device_was_powered_off = true;
+ if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ if (!(iwl_read32(trans, CSR_FUNC_SCRATCH) &
+ CSR_FUNC_SCRATCH_POWER_OFF_MASK))
+ device_was_powered_off = true;
+ } else {
+ /*
+ * bh are re-enabled by iwl_trans_pcie_release_nic_access,
+ * so re-enable them if _iwl_trans_pcie_grab_nic_access fails.
+ */
+ local_bh_disable();
+ if (_iwl_trans_pcie_grab_nic_access(trans, true)) {
+ iwl_trans_pcie_release_nic_access(trans);
+ } else {
+ device_was_powered_off = true;
+ local_bh_enable();
+ }
+ }
if (restore || device_was_powered_off) {
trans->state = IWL_TRANS_NO_FW;
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 738bafc3749b..66f0f5377ac1 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -403,14 +403,12 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
if (sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
bss_desc->bcn_ht_oper->ht_param &
- IEEE80211_HT_PARAM_CHAN_WIDTH_ANY) {
- chan_list->chan_scan_param[0].radio_type |=
- CHAN_BW_40MHZ << 2;
+ IEEE80211_HT_PARAM_CHAN_WIDTH_ANY)
SET_SECONDARYCHAN(chan_list->chan_scan_param[0].
radio_type,
(bss_desc->bcn_ht_oper->ht_param &
IEEE80211_HT_PARAM_CHA_SEC_OFFSET));
- }
+
*buffer += struct_size(chan_list, chan_scan_param, 1);
ret_len += struct_size(chan_list, chan_scan_param, 1);
}
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index 0b50da2f1175..6b3ac8ae3f34 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -429,21 +429,14 @@ static enum rq_end_io_ret nvme_uring_cmd_end_io(struct request *req,
pdu->result = le64_to_cpu(nvme_req(req)->result.u64);
/*
- * For iopoll, complete it directly. Note that using the uring_cmd
- * helper for this is safe only because we check blk_rq_is_poll().
- * As that returns false if we're NOT on a polled queue, then it's
- * safe to use the polled completion helper.
- *
- * Otherwise, move the completion to task work.
+ * IOPOLL could potentially complete this request directly, but
+ * if multiple rings are polling on the same queue, then it's possible
+ * for one ring to find completions for another ring. Punting the
+ * completion via task_work will always direct it to the right
+ * location, rather than potentially complete requests for ringA
+ * under iopoll invocations from ringB.
*/
- if (blk_rq_is_poll(req)) {
- if (pdu->bio)
- blk_rq_unmap_user(pdu->bio);
- io_uring_cmd_iopoll_done(ioucmd, pdu->result, pdu->status);
- } else {
- io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
- }
-
+ io_uring_cmd_do_in_task_lazy(ioucmd, nvme_uring_task_cb);
return RQ_END_IO_FREE;
}
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index fe2d52e434db..8a2ef74862d3 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -374,11 +374,6 @@ static struct st_pio_control *st_get_pio_control(
}
/* Low level functions.. */
-static inline int st_gpio_bank(int gpio)
-{
- return gpio/ST_GPIO_PINS_PER_BANK;
-}
-
static inline int st_gpio_pin(int gpio)
{
return gpio%ST_GPIO_PINS_PER_BANK;
diff --git a/drivers/pinctrl/pinctrl-tb10x.c b/drivers/pinctrl/pinctrl-tb10x.c
index d6bb8f58978d..4edb20e61951 100644
--- a/drivers/pinctrl/pinctrl-tb10x.c
+++ b/drivers/pinctrl/pinctrl-tb10x.c
@@ -823,7 +823,7 @@ static struct platform_driver tb10x_pinctrl_pdrv = {
.remove = tb10x_pinctrl_remove,
.driver = {
.name = "tb10x_pinctrl",
- .of_match_table = of_match_ptr(tb10x_pinctrl_dt_ids),
+ .of_match_table = tb10x_pinctrl_dt_ids,
}
};
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8064.c b/drivers/pinctrl/qcom/pinctrl-apq8064.c
index 20c3b9025044..3654913f1ae5 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8064.c
@@ -629,7 +629,6 @@ static struct platform_driver apq8064_pinctrl_driver = {
.of_match_table = apq8064_pinctrl_of_match,
},
.probe = apq8064_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init apq8064_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-apq8084.c b/drivers/pinctrl/qcom/pinctrl-apq8084.c
index 3fc0a40762b6..27693cd64881 100644
--- a/drivers/pinctrl/qcom/pinctrl-apq8084.c
+++ b/drivers/pinctrl/qcom/pinctrl-apq8084.c
@@ -1207,7 +1207,6 @@ static struct platform_driver apq8084_pinctrl_driver = {
.of_match_table = apq8084_pinctrl_of_match,
},
.probe = apq8084_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init apq8084_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
index 1f7944dd829d..6ede3149b6e1 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c
@@ -710,7 +710,6 @@ static struct platform_driver ipq4019_pinctrl_driver = {
.of_match_table = ipq4019_pinctrl_of_match,
},
.probe = ipq4019_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq4019_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5018.c b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
index e2951f81c3ee..10b99d5d8a11 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5018.c
@@ -754,7 +754,6 @@ static struct platform_driver ipq5018_pinctrl_driver = {
.of_match_table = ipq5018_pinctrl_of_match,
},
.probe = ipq5018_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq5018_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5332.c b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
index 625f8014051f..1ac2fc09c119 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5332.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5332.c
@@ -834,7 +834,6 @@ static struct platform_driver ipq5332_pinctrl_driver = {
.of_match_table = ipq5332_pinctrl_of_match,
},
.probe = ipq5332_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq5332_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq5424.c b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
index 0d610b076da3..7ff1f8acc1a3 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq5424.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq5424.c
@@ -791,7 +791,6 @@ static struct platform_driver ipq5424_pinctrl_driver = {
.of_match_table = ipq5424_pinctrl_of_match,
},
.probe = ipq5424_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq5424_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
index 0ad08647dbcd..a4ba980252e1 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq6018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
@@ -1080,7 +1080,6 @@ static struct platform_driver ipq6018_pinctrl_driver = {
.of_match_table = ipq6018_pinctrl_of_match,
},
.probe = ipq6018_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq6018_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8064.c b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
index e2bb94e86aef..0a9e357e64c6 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8064.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8064.c
@@ -631,7 +631,6 @@ static struct platform_driver ipq8064_pinctrl_driver = {
.of_match_table = ipq8064_pinctrl_of_match,
},
.probe = ipq8064_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq8064_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8074.c b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
index 337f3a1c92c1..482f13282fc2 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq8074.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq8074.c
@@ -1041,7 +1041,6 @@ static struct platform_driver ipq8074_pinctrl_driver = {
.of_match_table = ipq8074_pinctrl_of_match,
},
.probe = ipq8074_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq8074_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq9574.c b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
index e2491617b236..89c05d8eb550 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq9574.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq9574.c
@@ -799,7 +799,6 @@ static struct platform_driver ipq9574_pinctrl_driver = {
.of_match_table = ipq9574_pinctrl_of_match,
},
.probe = ipq9574_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init ipq9574_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9607.c b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
index e7cd3ef1cf3e..3e18ba124fed 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9607.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9607.c
@@ -1059,7 +1059,6 @@ static struct platform_driver mdm9607_pinctrl_driver = {
.of_match_table = mdm9607_pinctrl_of_match,
},
.probe = mdm9607_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init mdm9607_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-mdm9615.c b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
index 0a2ae383d3d5..bea1ca3d1b7f 100644
--- a/drivers/pinctrl/qcom/pinctrl-mdm9615.c
+++ b/drivers/pinctrl/qcom/pinctrl-mdm9615.c
@@ -446,7 +446,6 @@ static struct platform_driver mdm9615_pinctrl_driver = {
.of_match_table = mdm9615_pinctrl_of_match,
},
.probe = mdm9615_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init mdm9615_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index f012ea88aa22..5c4687de1464 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1442,7 +1442,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
girq->handler = handle_bad_irq;
girq->parents[0] = pctrl->irq;
- ret = gpiochip_add_data(&pctrl->chip, pctrl);
+ ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
dev_err(pctrl->dev, "Failed register gpiochip\n");
return ret;
@@ -1463,7 +1463,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
dev_name(pctrl->dev), 0, 0, chip->ngpio);
if (ret) {
dev_err(pctrl->dev, "Failed to add pin range\n");
- gpiochip_remove(&pctrl->chip);
return ret;
}
}
@@ -1599,13 +1598,5 @@ int msm_pinctrl_probe(struct platform_device *pdev,
}
EXPORT_SYMBOL(msm_pinctrl_probe);
-void msm_pinctrl_remove(struct platform_device *pdev)
-{
- struct msm_pinctrl *pctrl = platform_get_drvdata(pdev);
-
- gpiochip_remove(&pctrl->chip);
-}
-EXPORT_SYMBOL(msm_pinctrl_remove);
-
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. TLMM driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 63852ed70295..d7dc0947bb16 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -171,6 +171,5 @@ extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
int msm_pinctrl_probe(struct platform_device *pdev,
const struct msm_pinctrl_soc_data *soc_data);
-void msm_pinctrl_remove(struct platform_device *pdev);
#endif
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8226.c b/drivers/pinctrl/qcom/pinctrl-msm8226.c
index 64fee70f1772..f9a957347340 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8226.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8226.c
@@ -654,7 +654,6 @@ static struct platform_driver msm8226_pinctrl_driver = {
.of_match_table = msm8226_pinctrl_of_match,
},
.probe = msm8226_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8226_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8660.c b/drivers/pinctrl/qcom/pinctrl-msm8660.c
index 999a5f867eb5..4dbc19ffd80e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8660.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8660.c
@@ -981,7 +981,6 @@ static struct platform_driver msm8660_pinctrl_driver = {
.of_match_table = msm8660_pinctrl_of_match,
},
.probe = msm8660_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8660_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8909.c b/drivers/pinctrl/qcom/pinctrl-msm8909.c
index 756856d20d6b..0aa4f77b774f 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8909.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8909.c
@@ -929,7 +929,6 @@ static struct platform_driver msm8909_pinctrl_driver = {
.of_match_table = msm8909_pinctrl_of_match,
},
.probe = msm8909_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8909_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8916.c b/drivers/pinctrl/qcom/pinctrl-msm8916.c
index cea5c54f92fe..0dfc6dd33d58 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8916.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8916.c
@@ -969,7 +969,6 @@ static struct platform_driver msm8916_pinctrl_driver = {
.of_match_table = msm8916_pinctrl_of_match,
},
.probe = msm8916_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8916_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8917.c b/drivers/pinctrl/qcom/pinctrl-msm8917.c
index 350636807b07..2e1a94ab18b2 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8917.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8917.c
@@ -1607,7 +1607,6 @@ static struct platform_driver msm8917_pinctrl_driver = {
.of_match_table = msm8917_pinctrl_of_match,
},
.probe = msm8917_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8917_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8953.c b/drivers/pinctrl/qcom/pinctrl-msm8953.c
index 998351bdfee1..956383341a7a 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8953.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8953.c
@@ -1816,7 +1816,6 @@ static struct platform_driver msm8953_pinctrl_driver = {
.of_match_table = msm8953_pinctrl_of_match,
},
.probe = msm8953_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8953_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8960.c b/drivers/pinctrl/qcom/pinctrl-msm8960.c
index ebe230b3b437..a937ea867de7 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8960.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8960.c
@@ -1246,7 +1246,6 @@ static struct platform_driver msm8960_pinctrl_driver = {
.of_match_table = msm8960_pinctrl_of_match,
},
.probe = msm8960_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8960_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c
index c30d80e4e98c..3bcb03387781 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8976.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c
@@ -1096,7 +1096,6 @@ static struct platform_driver msm8976_pinctrl_driver = {
.of_match_table = msm8976_pinctrl_of_match,
},
.probe = msm8976_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8976_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8994.c b/drivers/pinctrl/qcom/pinctrl-msm8994.c
index b1a6759ab4a5..7a3b6cbccb68 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8994.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8994.c
@@ -1343,7 +1343,6 @@ static struct platform_driver msm8994_pinctrl_driver = {
.of_match_table = msm8994_pinctrl_of_match,
},
.probe = msm8994_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8994_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8996.c b/drivers/pinctrl/qcom/pinctrl-msm8996.c
index 1b5d80eaab83..d86d83106d3b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8996.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8996.c
@@ -1920,7 +1920,6 @@ static struct platform_driver msm8996_pinctrl_driver = {
.of_match_table = msm8996_pinctrl_of_match,
},
.probe = msm8996_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8996_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8998.c b/drivers/pinctrl/qcom/pinctrl-msm8998.c
index b7cbf32b3125..1daee815888f 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8998.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8998.c
@@ -1535,7 +1535,6 @@ static struct platform_driver msm8998_pinctrl_driver = {
.of_match_table = msm8998_pinctrl_of_match,
},
.probe = msm8998_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8998_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-msm8x74.c b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
index 238c83f6ec4f..8253aa25775b 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm8x74.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm8x74.c
@@ -1083,7 +1083,6 @@ static struct platform_driver msm8x74_pinctrl_driver = {
.of_match_table = msm8x74_pinctrl_of_match,
},
.probe = msm8x74_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init msm8x74_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcm2290.c b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
index f885af571ec9..eeeec6434f6a 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcm2290.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcm2290.c
@@ -167,6 +167,10 @@ static const struct pinctrl_pin_desc qcm2290_pins[] = {
PINCTRL_PIN(62, "GPIO_62"),
PINCTRL_PIN(63, "GPIO_63"),
PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
PINCTRL_PIN(69, "GPIO_69"),
PINCTRL_PIN(70, "GPIO_70"),
PINCTRL_PIN(71, "GPIO_71"),
@@ -181,12 +185,17 @@ static const struct pinctrl_pin_desc qcm2290_pins[] = {
PINCTRL_PIN(80, "GPIO_80"),
PINCTRL_PIN(81, "GPIO_81"),
PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
PINCTRL_PIN(86, "GPIO_86"),
PINCTRL_PIN(87, "GPIO_87"),
PINCTRL_PIN(88, "GPIO_88"),
PINCTRL_PIN(89, "GPIO_89"),
PINCTRL_PIN(90, "GPIO_90"),
PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
PINCTRL_PIN(94, "GPIO_94"),
PINCTRL_PIN(95, "GPIO_95"),
PINCTRL_PIN(96, "GPIO_96"),
@@ -1125,7 +1134,6 @@ static struct platform_driver qcm2290_pinctrl_driver = {
.of_match_table = qcm2290_pinctrl_of_match,
},
.probe = qcm2290_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcm2290_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c
index ae7224012f8a..54e3b4435349 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs404.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c
@@ -1644,7 +1644,6 @@ static struct platform_driver qcs404_pinctrl_driver = {
.of_match_table = qcs404_pinctrl_of_match,
},
.probe = qcs404_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcs404_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c
index 17ca743c2210..2a943bc46a62 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs615.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c
@@ -1087,7 +1087,6 @@ static struct platform_driver qcs615_tlmm_driver = {
.of_match_table = qcs615_tlmm_of_match,
},
.probe = qcs615_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcs615_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs8300.c b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
index 5f5f7c4ac644..d6437e26392b 100644
--- a/drivers/pinctrl/qcom/pinctrl-qcs8300.c
+++ b/drivers/pinctrl/qcom/pinctrl-qcs8300.c
@@ -1227,7 +1227,6 @@ static struct platform_driver qcs8300_pinctrl_driver = {
.of_match_table = qcs8300_pinctrl_of_match,
},
.probe = qcs8300_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qcs8300_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
index b5808fcfb13c..9ecc4d40e4dc 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdf2xxx.c
@@ -145,7 +145,6 @@ static struct platform_driver qdf2xxx_pinctrl_driver = {
.acpi_match_table = qdf2xxx_acpi_ids,
},
.probe = qdf2xxx_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qdf2xxx_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-qdu1000.c b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
index 47bc529ef550..eacb89fa3888 100644
--- a/drivers/pinctrl/qcom/pinctrl-qdu1000.c
+++ b/drivers/pinctrl/qcom/pinctrl-qdu1000.c
@@ -1248,7 +1248,6 @@ static struct platform_driver qdu1000_tlmm_driver = {
.of_match_table = qdu1000_tlmm_of_match,
},
.probe = qdu1000_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init qdu1000_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
index a5b38221aea8..1b62eb3e6620 100644
--- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
@@ -1540,7 +1540,6 @@ static struct platform_driver sa8775p_pinctrl_driver = {
.of_match_table = sa8775p_pinctrl_of_match,
},
.probe = sa8775p_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sa8775p_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sar2130p.c b/drivers/pinctrl/qcom/pinctrl-sar2130p.c
index 19a2e37826c7..3dd1b5e5cfee 100644
--- a/drivers/pinctrl/qcom/pinctrl-sar2130p.c
+++ b/drivers/pinctrl/qcom/pinctrl-sar2130p.c
@@ -1486,7 +1486,6 @@ static struct platform_driver sar2130p_tlmm_driver = {
.of_match_table = sar2130p_tlmm_of_match,
},
.probe = sar2130p_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sar2130p_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7180.c b/drivers/pinctrl/qcom/pinctrl-sc7180.c
index 6eb0c73791c0..c43fe10b71ad 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7180.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7180.c
@@ -1159,7 +1159,6 @@ static struct platform_driver sc7180_pinctrl_driver = {
.of_match_table = sc7180_pinctrl_of_match,
},
.probe = sc7180_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc7180_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 0c10eeb60b55..1b070e9d41f5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -1505,7 +1505,6 @@ static struct platform_driver sc7280_pinctrl_driver = {
.of_match_table = sc7280_pinctrl_of_match,
},
.probe = sc7280_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc7280_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8180x.c b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
index d6a79ad41a40..26dd165d1543 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8180x.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8180x.c
@@ -1720,7 +1720,6 @@ static struct platform_driver sc8180x_pinctrl_driver = {
.acpi_match_table = sc8180x_pinctrl_acpi_match,
},
.probe = sc8180x_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc8180x_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
index 96f4fb5a5d29..6ccd7e5648d4 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c
@@ -1926,7 +1926,6 @@ static struct platform_driver sc8280xp_pinctrl_driver = {
.of_match_table = sc8280xp_pinctrl_of_match,
},
.probe = sc8280xp_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sc8280xp_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm660.c b/drivers/pinctrl/qcom/pinctrl-sdm660.c
index 907e4ffca5e7..1a78288f1bc8 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm660.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm660.c
@@ -1442,7 +1442,6 @@ static struct platform_driver sdm660_pinctrl_driver = {
.of_match_table = sdm660_pinctrl_of_match,
},
.probe = sdm660_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdm660_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm670.c b/drivers/pinctrl/qcom/pinctrl-sdm670.c
index c76183ba95e1..0fe1fa94cd6d 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm670.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm670.c
@@ -1337,7 +1337,6 @@ static struct platform_driver sdm670_pinctrl_driver = {
.of_match_table = sdm670_pinctrl_of_match,
},
.probe = sdm670_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdm670_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdm845.c b/drivers/pinctrl/qcom/pinctrl-sdm845.c
index cc05c415ed15..0446e291aa48 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdm845.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdm845.c
@@ -1351,7 +1351,6 @@ static struct platform_driver sdm845_pinctrl_driver = {
.acpi_match_table = ACPI_PTR(sdm845_pinctrl_acpi_match),
},
.probe = sdm845_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdm845_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx55.c b/drivers/pinctrl/qcom/pinctrl-sdx55.c
index 8826db9d21d0..2c17bf889146 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx55.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx55.c
@@ -990,7 +990,6 @@ static struct platform_driver sdx55_pinctrl_driver = {
.of_match_table = sdx55_pinctrl_of_match,
},
.probe = sdx55_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdx55_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx65.c b/drivers/pinctrl/qcom/pinctrl-sdx65.c
index f6f319c997fc..85b5c0206dbd 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx65.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx65.c
@@ -939,7 +939,6 @@ static struct platform_driver sdx65_pinctrl_driver = {
.of_match_table = sdx65_pinctrl_of_match,
},
.probe = sdx65_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdx65_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx75.c b/drivers/pinctrl/qcom/pinctrl-sdx75.c
index 3cfe8c7f04df..ab13a3a57a83 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdx75.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdx75.c
@@ -1124,7 +1124,6 @@ static struct platform_driver sdx75_pinctrl_driver = {
.of_match_table = sdx75_pinctrl_of_match,
},
.probe = sdx75_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sdx75_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm4450.c b/drivers/pinctrl/qcom/pinctrl-sm4450.c
index 622f20e6f6f8..1ecdf1ab4f27 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm4450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm4450.c
@@ -994,7 +994,6 @@ static struct platform_driver sm4450_tlmm_driver = {
.of_match_table = sm4450_tlmm_of_match,
},
.probe = sm4450_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
MODULE_DEVICE_TABLE(of, sm4450_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6115.c b/drivers/pinctrl/qcom/pinctrl-sm6115.c
index 4e91c75ad952..c273efa43996 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6115.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6115.c
@@ -907,7 +907,6 @@ static struct platform_driver sm6115_tlmm_driver = {
.of_match_table = sm6115_tlmm_of_match,
},
.probe = sm6115_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6115_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6125.c b/drivers/pinctrl/qcom/pinctrl-sm6125.c
index c188842047aa..5092f20e0c1b 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6125.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6125.c
@@ -1266,7 +1266,6 @@ static struct platform_driver sm6125_tlmm_driver = {
.of_match_table = sm6125_tlmm_of_match,
},
.probe = sm6125_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6125_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6350.c b/drivers/pinctrl/qcom/pinctrl-sm6350.c
index f3828c07b134..ba4686c86c54 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6350.c
@@ -1373,7 +1373,6 @@ static struct platform_driver sm6350_tlmm_driver = {
.of_match_table = sm6350_tlmm_of_match,
},
.probe = sm6350_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6350_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm6375.c b/drivers/pinctrl/qcom/pinctrl-sm6375.c
index c82c8516932e..49031571e65e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm6375.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm6375.c
@@ -1516,7 +1516,6 @@ static struct platform_driver sm6375_tlmm_driver = {
.of_match_table = sm6375_tlmm_of_match,
},
.probe = sm6375_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm6375_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm7150.c b/drivers/pinctrl/qcom/pinctrl-sm7150.c
index 3c7fd8af6635..6e89966cd70e 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm7150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm7150.c
@@ -1255,7 +1255,6 @@ static struct platform_driver sm7150_tlmm_driver = {
.of_match_table = sm7150_tlmm_of_match,
},
.probe = sm7150_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm7150_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8150.c b/drivers/pinctrl/qcom/pinctrl-sm8150.c
index 01aea9c70b7a..794ed99463f7 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8150.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8150.c
@@ -1542,7 +1542,6 @@ static struct platform_driver sm8150_pinctrl_driver = {
.of_match_table = sm8150_pinctrl_of_match,
},
.probe = sm8150_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8150_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c
index e9961a49ff98..fb6f005d64f5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8250.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c
@@ -1351,7 +1351,6 @@ static struct platform_driver sm8250_pinctrl_driver = {
.of_match_table = sm8250_pinctrl_of_match,
},
.probe = sm8250_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8250_pinctrl_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350.c b/drivers/pinctrl/qcom/pinctrl-sm8350.c
index 9c69458bd910..c8a3f39ce6f1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8350.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8350.c
@@ -1642,7 +1642,6 @@ static struct platform_driver sm8350_tlmm_driver = {
.of_match_table = sm8350_tlmm_of_match,
},
.probe = sm8350_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8350_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450.c b/drivers/pinctrl/qcom/pinctrl-sm8450.c
index d11bb1ee9e3d..f2e52d5a0f93 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8450.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450.c
@@ -1677,7 +1677,6 @@ static struct platform_driver sm8450_tlmm_driver = {
.of_match_table = sm8450_tlmm_of_match,
},
.probe = sm8450_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8450_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8550.c b/drivers/pinctrl/qcom/pinctrl-sm8550.c
index 3c847d9cb5d9..1b4496cb39eb 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8550.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8550.c
@@ -1762,7 +1762,6 @@ static struct platform_driver sm8550_tlmm_driver = {
.of_match_table = sm8550_tlmm_of_match,
},
.probe = sm8550_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8550_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8650.c b/drivers/pinctrl/qcom/pinctrl-sm8650.c
index 104708252d12..449a0077f4b1 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8650.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8650.c
@@ -1742,7 +1742,6 @@ static struct platform_driver sm8650_tlmm_driver = {
.of_match_table = sm8650_tlmm_of_match,
},
.probe = sm8650_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8650_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8750.c b/drivers/pinctrl/qcom/pinctrl-sm8750.c
index b94fb4ee0ec3..8516693d1db5 100644
--- a/drivers/pinctrl/qcom/pinctrl-sm8750.c
+++ b/drivers/pinctrl/qcom/pinctrl-sm8750.c
@@ -1711,7 +1711,6 @@ static struct platform_driver sm8750_tlmm_driver = {
.of_match_table = sm8750_tlmm_of_match,
},
.probe = sm8750_tlmm_probe,
- .remove = msm_pinctrl_remove,
};
static int __init sm8750_tlmm_init(void)
diff --git a/drivers/pinctrl/qcom/pinctrl-x1e80100.c b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
index 419cb8facb2f..d4b215f34c39 100644
--- a/drivers/pinctrl/qcom/pinctrl-x1e80100.c
+++ b/drivers/pinctrl/qcom/pinctrl-x1e80100.c
@@ -1861,7 +1861,6 @@ static struct platform_driver x1e80100_pinctrl_driver = {
.of_match_table = x1e80100_pinctrl_of_match,
},
.probe = x1e80100_pinctrl_probe,
- .remove = msm_pinctrl_remove,
};
static int __init x1e80100_pinctrl_init(void)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
index 1833078f6877..4e34b0cd3b73 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi-dt.c
@@ -143,7 +143,7 @@ static struct sunxi_desc_pin *init_pins_table(struct device *dev,
*/
static int prepare_function_table(struct device *dev, struct device_node *pnode,
struct sunxi_desc_pin *pins, int npins,
- const u8 *irq_bank_muxes)
+ unsigned pin_base, const u8 *irq_bank_muxes)
{
struct device_node *node;
struct property *prop;
@@ -166,7 +166,7 @@ static int prepare_function_table(struct device *dev, struct device_node *pnode,
*/
for (i = 0; i < npins; i++) {
struct sunxi_desc_pin *pin = &pins[i];
- int bank = pin->pin.number / PINS_PER_BANK;
+ int bank = (pin->pin.number - pin_base) / PINS_PER_BANK;
if (irq_bank_muxes[bank]) {
pin->variant++;
@@ -211,7 +211,7 @@ static int prepare_function_table(struct device *dev, struct device_node *pnode,
last_bank = 0;
for (i = 0; i < npins; i++) {
struct sunxi_desc_pin *pin = &pins[i];
- int bank = pin->pin.number / PINS_PER_BANK;
+ int bank = (pin->pin.number - pin_base) / PINS_PER_BANK;
int lastfunc = pin->variant + 1;
int irq_mux = irq_bank_muxes[bank];
@@ -353,7 +353,7 @@ int sunxi_pinctrl_dt_table_init(struct platform_device *pdev,
return PTR_ERR(pins);
ret = prepare_function_table(&pdev->dev, pnode, pins, desc->npins,
- irq_bank_muxes);
+ desc->pin_base, irq_bank_muxes);
if (ret)
return ret;
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 18934e28469e..528d86a33f37 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -98,17 +98,7 @@ static inline int queue_cnt(const struct timestamp_event_queue *q)
/* Check if ptp virtual clock is in use */
static inline bool ptp_vclock_in_use(struct ptp_clock *ptp)
{
- bool in_use = false;
-
- if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
- return true;
-
- if (!ptp->is_virtual_clock && ptp->n_vclocks)
- in_use = true;
-
- mutex_unlock(&ptp->n_vclocks_mux);
-
- return in_use;
+ return !ptp->is_virtual_clock;
}
/* Check if ptp clock shall be free running */
diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
index 97287e838ce1..66464674223f 100644
--- a/drivers/rapidio/rio_cm.c
+++ b/drivers/rapidio/rio_cm.c
@@ -783,6 +783,9 @@ static int riocm_ch_send(u16 ch_id, void *buf, int len)
if (buf == NULL || ch_id == 0 || len == 0 || len > RIO_MAX_MSG_SIZE)
return -EINVAL;
+ if (len < sizeof(struct rio_ch_chan_hdr))
+ return -EINVAL; /* insufficient data from user */
+
ch = riocm_get_channel(ch_id);
if (!ch) {
riocm_error("%s(%d) ch_%d not found", current->comm,
diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
index b4fe76e33ff2..fcdd2d0317a5 100644
--- a/drivers/regulator/max20086-regulator.c
+++ b/drivers/regulator/max20086-regulator.c
@@ -5,6 +5,7 @@
// Copyright (C) 2022 Laurent Pinchart <laurent.pinchart@idesonboard.com>
// Copyright (C) 2018 Avnet, Inc.
+#include <linux/cleanup.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
@@ -133,11 +134,11 @@ static int max20086_regulators_register(struct max20086 *chip)
static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
{
struct of_regulator_match *matches;
- struct device_node *node;
unsigned int i;
int ret;
- node = of_get_child_by_name(chip->dev->of_node, "regulators");
+ struct device_node *node __free(device_node) =
+ of_get_child_by_name(chip->dev->of_node, "regulators");
if (!node) {
dev_err(chip->dev, "regulators node not found\n");
return -ENODEV;
@@ -153,7 +154,6 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
ret = of_regulator_match(chip->dev, node, matches,
chip->info->num_outputs);
- of_node_put(node);
if (ret < 0) {
dev_err(chip->dev, "Failed to match regulators\n");
return -EINVAL;
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 41e36af35488..90a84ae98b97 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -449,6 +449,8 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
+ flush_work(&port->rport_work);
+
retval = zfcp_unit_add(port, fcp_lun);
if (retval)
return retval;
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
index 8ef174cd4d37..3e4124177b2a 100644
--- a/drivers/scsi/mvsas/mv_defs.h
+++ b/drivers/scsi/mvsas/mv_defs.h
@@ -215,7 +215,7 @@ enum hw_register_bits {
/* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
PHYEV_DEC_ERR = (1U << 24), /* Phy Decoding Error */
- PHYEV_DCDR_ERR = (1U << 23), /* STP Deocder Error */
+ PHYEV_DCDR_ERR = (1U << 23), /* STP Decoder Error */
PHYEV_CRC_ERR = (1U << 22), /* STP CRC Error */
PHYEV_UNASSOC_FIS = (1U << 19), /* unassociated FIS rx'd */
PHYEV_AN = (1U << 18), /* SATA async notification */
@@ -347,7 +347,7 @@ enum sas_cmd_port_registers {
CMD_SATA_PORT_MEM_CTL0 = 0x158, /* SATA Port Memory Control 0 */
CMD_SATA_PORT_MEM_CTL1 = 0x15c, /* SATA Port Memory Control 1 */
CMD_XOR_MEM_BIST_CTL = 0x160, /* XOR Memory BIST Control */
- CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memroy BIST Status */
+ CMD_XOR_MEM_BIST_STAT = 0x164, /* XOR Memory BIST Status */
CMD_DMA_MEM_BIST_CTL = 0x168, /* DMA Memory BIST Control */
CMD_DMA_MEM_BIST_STAT = 0x16c, /* DMA Memory BIST Status */
CMD_PORT_MEM_BIST_CTL = 0x170, /* Port Memory BIST Control */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 376b8897ab90..746ff6a1f309 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -665,7 +665,8 @@ enum scsi_disposition scsi_check_sense(struct scsi_cmnd *scmd)
* if the device is in the process of becoming ready, we
* should retry.
*/
- if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01))
+ if ((sshdr.asc == 0x04) &&
+ (sshdr.ascq == 0x01 || sshdr.ascq == 0x0a))
return NEEDS_RETRY;
/*
* if the device is not started, we need to wake
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0b8c91bf793f..c75a806496d6 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -3499,7 +3499,7 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.new_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_new_fnode;
}
index = transport->new_flashnode(shost, data, len);
@@ -3509,7 +3509,6 @@ static int iscsi_new_flashnode(struct iscsi_transport *transport,
else
err = -EIO;
-put_host:
scsi_host_put(shost);
exit_new_fnode:
@@ -3534,7 +3533,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.del_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_del_fnode;
}
idx = ev->u.del_flashnode.flashnode_idx;
@@ -3576,7 +3575,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.login_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_login_fnode;
}
idx = ev->u.login_flashnode.flashnode_idx;
@@ -3628,7 +3627,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.logout_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_logout_fnode;
}
idx = ev->u.logout_flashnode.flashnode_idx;
@@ -3678,7 +3677,7 @@ static int iscsi_logout_flashnode_sid(struct iscsi_transport *transport,
pr_err("%s could not find host no %u\n",
__func__, ev->u.logout_flashnode.host_no);
err = -ENODEV;
- goto put_host;
+ goto exit_logout_sid;
}
session = iscsi_session_lookup(ev->u.logout_flashnode_sid.sid);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 2e6b2412d2c9..d9e59204a9c3 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -362,7 +362,7 @@ MODULE_PARM_DESC(ring_avail_percent_lowater,
/*
* Timeout in seconds for all devices managed by this driver.
*/
-static int storvsc_timeout = 180;
+static const int storvsc_timeout = 180;
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
static struct scsi_transport_template *fc_transport_template;
@@ -768,7 +768,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
return;
}
- t = wait_for_completion_timeout(&request->wait_event, 10*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0) {
dev_err(dev, "Failed to create sub-channel: timed out\n");
return;
@@ -833,7 +833,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
if (ret != 0)
return ret;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0)
return -ETIMEDOUT;
@@ -1350,6 +1350,8 @@ static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size,
return ret;
ret = storvsc_channel_init(device, is_fc);
+ if (ret)
+ vmbus_close(device->channel);
return ret;
}
@@ -1668,7 +1670,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
if (ret != 0)
return FAILED;
- t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
+ t = wait_for_completion_timeout(&request->wait_event, storvsc_timeout * HZ);
if (t == 0)
return TIMEOUT_ERROR;
diff --git a/drivers/spi/spi-loongson-core.c b/drivers/spi/spi-loongson-core.c
index 4fec226456d1..b46f072a0387 100644
--- a/drivers/spi/spi-loongson-core.c
+++ b/drivers/spi/spi-loongson-core.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c
index e674097bf3be..d336f4d228d5 100644
--- a/drivers/spi/spi-offload.c
+++ b/drivers/spi/spi-offload.c
@@ -297,7 +297,7 @@ int spi_offload_trigger_enable(struct spi_offload *offload,
if (trigger->ops->enable) {
ret = trigger->ops->enable(trigger, config);
if (ret) {
- if (offload->ops->trigger_disable)
+ if (offload->ops && offload->ops->trigger_disable)
offload->ops->trigger_disable(offload);
return ret;
}
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 29c616e2c408..70bb74b3bd9c 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -134,6 +134,7 @@ struct omap2_mcspi {
size_t max_xfer_len;
u32 ref_clk_hz;
bool use_multi_mode;
+ bool last_msg_kept_cs;
};
struct omap2_mcspi_cs {
@@ -1269,6 +1270,10 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
* multi-mode is applicable.
*/
mcspi->use_multi_mode = true;
+
+ if (mcspi->last_msg_kept_cs)
+ mcspi->use_multi_mode = false;
+
list_for_each_entry(tr, &msg->transfers, transfer_list) {
if (!tr->bits_per_word)
bits_per_word = msg->spi->bits_per_word;
@@ -1287,18 +1292,19 @@ static int omap2_mcspi_prepare_message(struct spi_controller *ctlr,
mcspi->use_multi_mode = false;
}
- /* Check if transfer asks to change the CS status after the transfer */
- if (!tr->cs_change)
- mcspi->use_multi_mode = false;
-
- /*
- * If at least one message is not compatible, switch back to single mode
- *
- * The bits_per_word of certain transfer can be different, but it will have no
- * impact on the signal itself.
- */
- if (!mcspi->use_multi_mode)
- break;
+ if (list_is_last(&tr->transfer_list, &msg->transfers)) {
+ /* Check if transfer asks to keep the CS status after the whole message */
+ if (tr->cs_change) {
+ mcspi->use_multi_mode = false;
+ mcspi->last_msg_kept_cs = true;
+ } else {
+ mcspi->last_msg_kept_cs = false;
+ }
+ } else {
+ /* Check if transfer asks to change the CS status after the transfer */
+ if (!tr->cs_change)
+ mcspi->use_multi_mode = false;
+ }
}
omap2_mcspi_set_mode(ctlr);
diff --git a/drivers/spi/spi-pci1xxxx.c b/drivers/spi/spi-pci1xxxx.c
index c6489e90b8b9..e27642c4dea4 100644
--- a/drivers/spi/spi-pci1xxxx.c
+++ b/drivers/spi/spi-pci1xxxx.c
@@ -762,10 +762,10 @@ static int pci1xxxx_spi_probe(struct pci_dev *pdev, const struct pci_device_id *
return -EINVAL;
num_vector = pci_alloc_irq_vectors(pdev, 1, hw_inst_cnt,
- PCI_IRQ_ALL_TYPES);
+ PCI_IRQ_INTX | PCI_IRQ_MSI);
if (num_vector < 0) {
dev_err(&pdev->dev, "Error allocating MSI vectors\n");
- return ret;
+ return num_vector;
}
init_completion(&spi_sub_ptr->spi_xfer_done);
diff --git a/drivers/spi/spi-stm32-ospi.c b/drivers/spi/spi-stm32-ospi.c
index 7c1fa55fbc47..4ab7e86f4bd5 100644
--- a/drivers/spi/spi-stm32-ospi.c
+++ b/drivers/spi/spi-stm32-ospi.c
@@ -804,7 +804,7 @@ static int stm32_ospi_get_resources(struct platform_device *pdev)
return ret;
}
- ospi->rstc = devm_reset_control_array_get_exclusive(dev);
+ ospi->rstc = devm_reset_control_array_get_exclusive_released(dev);
if (IS_ERR(ospi->rstc))
return dev_err_probe(dev, PTR_ERR(ospi->rstc),
"Can't get reset\n");
@@ -936,12 +936,16 @@ static int stm32_ospi_probe(struct platform_device *pdev)
if (ret < 0)
goto err_pm_enable;
- if (ospi->rstc) {
- reset_control_assert(ospi->rstc);
- udelay(2);
- reset_control_deassert(ospi->rstc);
+ ret = reset_control_acquire(ospi->rstc);
+ if (ret) {
+ dev_err_probe(dev, ret, "Can not acquire reset %d\n", ret);
+ goto err_pm_resume;
}
+ reset_control_assert(ospi->rstc);
+ udelay(2);
+ reset_control_deassert(ospi->rstc);
+
ret = spi_register_controller(ctrl);
if (ret) {
/* Disable ospi */
@@ -987,6 +991,8 @@ static void stm32_ospi_remove(struct platform_device *pdev)
if (ospi->dma_chrx)
dma_release_channel(ospi->dma_chrx);
+ reset_control_release(ospi->rstc);
+
pm_runtime_put_sync_suspend(ospi->dev);
pm_runtime_force_suspend(ospi->dev);
}
@@ -997,6 +1003,8 @@ static int __maybe_unused stm32_ospi_suspend(struct device *dev)
pinctrl_pm_select_sleep_state(dev);
+ reset_control_release(ospi->rstc);
+
return pm_runtime_force_suspend(ospi->dev);
}
@@ -1016,6 +1024,12 @@ static int __maybe_unused stm32_ospi_resume(struct device *dev)
if (ret < 0)
return ret;
+ ret = reset_control_acquire(ospi->rstc);
+ if (ret) {
+ dev_err(dev, "Can not acquire reset\n");
+ return ret;
+ }
+
writel_relaxed(ospi->cr_reg, regs_base + OSPI_CR);
writel_relaxed(ospi->dcr_reg, regs_base + OSPI_DCR1);
pm_runtime_mark_last_busy(ospi->dev);
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 4410e7d93b7d..f62d89c8e580 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -6623,9 +6623,14 @@ static void ufshcd_err_handler(struct work_struct *work)
up(&hba->host_sem);
return;
}
- ufshcd_set_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
+
ufshcd_err_handling_prepare(hba);
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba, false);
spin_lock_irqsave(hba->host->host_lock, flags);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 5df981920a94..c3da6c0bfca6 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -61,6 +61,23 @@ config HDMI
endif # HAS_IOMEM
+config FIRMWARE_EDID
+ bool "Enable firmware EDID"
+ depends on X86
+ help
+ This enables access to the EDID transferred from the firmware.
+ On x86, this is from the VESA BIOS. DRM display drivers will
+ be able to export the information to userspace.
+
+ Also enable this if DDC/I2C transfers do not work for your driver
+ and if you are using nvidiafb, i810fb or savagefb.
+
+ In general, choosing Y for this option is safe. If you
+ experience extremely long delays while booting before you get
+ something on your display, try setting this to N. Matrox cards in
+ combination with certain motherboards and monitors are known to
+ suffer from this problem.
+
if VT
source "drivers/video/console/Kconfig"
endif
@@ -70,5 +87,4 @@ if FB_CORE || SGI_NEWPORT_CONSOLE
endif
-
endmenu
diff --git a/drivers/video/fbdev/c2p_iplan2.c b/drivers/video/fbdev/c2p_iplan2.c
index cfd2361f24b1..ee4b315d3f40 100644
--- a/drivers/video/fbdev/c2p_iplan2.c
+++ b/drivers/video/fbdev/c2p_iplan2.c
@@ -8,6 +8,7 @@
* for more details.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/string.h>
diff --git a/drivers/video/fbdev/c2p_planar.c b/drivers/video/fbdev/c2p_planar.c
index 819c82a98ac0..236aad5137ef 100644
--- a/drivers/video/fbdev/c2p_planar.c
+++ b/drivers/video/fbdev/c2p_planar.c
@@ -8,6 +8,7 @@
* for more details.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/string.h>
diff --git a/drivers/video/fbdev/core/Kconfig b/drivers/video/fbdev/core/Kconfig
index 4abe12db7594..b38c3b776bce 100644
--- a/drivers/video/fbdev/core/Kconfig
+++ b/drivers/video/fbdev/core/Kconfig
@@ -10,21 +10,6 @@ config FB_CORE
config FB_NOTIFY
bool
-config FIRMWARE_EDID
- bool "Enable firmware EDID"
- depends on FB
- help
- This enables access to the EDID transferred from the firmware.
- On the i386, this is from the Video BIOS. Enable this if DDC/I2C
- transfers do not work for your driver and if you are using
- nvidiafb, i810fb or savagefb.
-
- In general, choosing Y for this option is safe. If you
- experience extremely long delays while booting before you get
- something on your display, try setting this to N. Matrox cards in
- combination with certain motherboards and monitors are known to
- suffer from this problem.
-
config FB_DEVICE
bool "Provide legacy /dev/fb* device"
depends on FB_CORE
diff --git a/drivers/video/fbdev/core/cfbcopyarea.c b/drivers/video/fbdev/core/cfbcopyarea.c
index 23fbf3a8df7c..ce2e6807be60 100644
--- a/drivers/video/fbdev/core/cfbcopyarea.c
+++ b/drivers/video/fbdev/core/cfbcopyarea.c
@@ -2,6 +2,8 @@
/*
* Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/bitrev.h>
diff --git a/drivers/video/fbdev/core/cfbfillrect.c b/drivers/video/fbdev/core/cfbfillrect.c
index 615de89256d5..bd2fbbda10c6 100644
--- a/drivers/video/fbdev/core/cfbfillrect.c
+++ b/drivers/video/fbdev/core/cfbfillrect.c
@@ -2,6 +2,8 @@
/*
* Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/bitrev.h>
diff --git a/drivers/video/fbdev/core/cfbimgblt.c b/drivers/video/fbdev/core/cfbimgblt.c
index bcec4e32c0e7..e116cd1d8a39 100644
--- a/drivers/video/fbdev/core/cfbimgblt.c
+++ b/drivers/video/fbdev/core/cfbimgblt.c
@@ -2,6 +2,8 @@
/*
* Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/bitrev.h>
diff --git a/drivers/video/fbdev/core/fb_ddc.c b/drivers/video/fbdev/core/fb_ddc.c
index e25143219862..824796361367 100644
--- a/drivers/video/fbdev/core/fb_ddc.c
+++ b/drivers/video/fbdev/core/fb_ddc.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/i2c-algo-bit.h>
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 4fc93f253e06..8df2e51e3390 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
diff --git a/drivers/video/fbdev/core/fb_io_fops.c b/drivers/video/fbdev/core/fb_io_fops.c
index 3408ff1b2b7a..6ab60fcd0050 100644
--- a/drivers/video/fbdev/core/fb_io_fops.c
+++ b/drivers/video/fbdev/core/fb_io_fops.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/export.h>
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/uaccess.h>
diff --git a/drivers/video/fbdev/core/fb_sys_fops.c b/drivers/video/fbdev/core/fb_sys_fops.c
index a9aa6519a5b3..be96b3b3942e 100644
--- a/drivers/video/fbdev/core/fb_sys_fops.c
+++ b/drivers/video/fbdev/core/fb_sys_fops.c
@@ -9,6 +9,8 @@
* for more details.
*
*/
+
+#include <linux/export.h>
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/uaccess.h>
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index ff09e57f3c38..9cc3e87da14b 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -11,6 +11,7 @@
* more details.
*/
+#include <linux/export.h>
#include <linux/string.h>
#include <linux/module.h>
#include <linux/fb.h>
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 2df48037688d..25684f5d6523 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -56,6 +56,7 @@
* more details.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 0a26399dbc89..3b779c27c271 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -26,6 +26,8 @@
* for more details.
*
*/
+
+#include <linux/export.h>
#include <linux/fb.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -1482,13 +1484,12 @@ int fb_validate_mode(const struct fb_var_screeninfo *var, struct fb_info *info)
-EINVAL : 0;
}
-#if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86)
-
/*
* We need to ensure that the EDID block is only returned for
* the primary graphics adapter.
*/
+#if defined(CONFIG_FIRMWARE_EDID)
const unsigned char *fb_firmware_edid(struct device *device)
{
struct pci_dev *dev = NULL;
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 7196b055f2bd..53a610948c4a 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -11,6 +11,7 @@
* more details.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
diff --git a/drivers/video/fbdev/core/svgalib.c b/drivers/video/fbdev/core/svgalib.c
index 821b89a0a645..d6053af749f6 100644
--- a/drivers/video/fbdev/core/svgalib.c
+++ b/drivers/video/fbdev/core/svgalib.c
@@ -10,6 +10,7 @@
* Some parts are based on David Boucher's viafb (http://davesdomain.org.uk/viafb/)
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
diff --git a/drivers/video/fbdev/core/syscopyarea.c b/drivers/video/fbdev/core/syscopyarea.c
index b634e2d21208..773569bce67c 100644
--- a/drivers/video/fbdev/core/syscopyarea.c
+++ b/drivers/video/fbdev/core/syscopyarea.c
@@ -2,6 +2,8 @@
/*
* Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/bitrev.h>
diff --git a/drivers/video/fbdev/core/sysfillrect.c b/drivers/video/fbdev/core/sysfillrect.c
index 372ca6a324c2..12eea3e424bb 100644
--- a/drivers/video/fbdev/core/sysfillrect.c
+++ b/drivers/video/fbdev/core/sysfillrect.c
@@ -2,6 +2,8 @@
/*
* Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/bitrev.h>
diff --git a/drivers/video/fbdev/core/sysimgblt.c b/drivers/video/fbdev/core/sysimgblt.c
index c756cc658b7d..0a5bfd8ad095 100644
--- a/drivers/video/fbdev/core/sysimgblt.c
+++ b/drivers/video/fbdev/core/sysimgblt.c
@@ -2,6 +2,8 @@
/*
* Copyright (C) 2025 Zsolt Kajtar (soci@c64.rulez.org)
*/
+
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/bitrev.h>
diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c
index 986760b90465..5cb5ee517f81 100644
--- a/drivers/video/fbdev/cyber2000fb.c
+++ b/drivers/video/fbdev/cyber2000fb.c
@@ -1089,7 +1089,6 @@ void cyber2000fb_enable_extregs(struct cfb_info *cfb)
cyber2000_grphw(EXT_FUNC_CTL, old, cfb);
}
}
-EXPORT_SYMBOL(cyber2000fb_enable_extregs);
/*
* Disable access to the extended registers
@@ -1109,41 +1108,6 @@ void cyber2000fb_disable_extregs(struct cfb_info *cfb)
else
cfb->func_use_count -= 1;
}
-EXPORT_SYMBOL(cyber2000fb_disable_extregs);
-
-/*
- * Attach a capture/tv driver to the core CyberX0X0 driver.
- */
-int cyber2000fb_attach(struct cyberpro_info *info, int idx)
-{
- if (int_cfb_info != NULL) {
- info->dev = int_cfb_info->fb.device;
-#ifdef CONFIG_FB_CYBER2000_I2C
- info->i2c = &int_cfb_info->i2c_adapter;
-#else
- info->i2c = NULL;
-#endif
- info->regs = int_cfb_info->regs;
- info->irq = int_cfb_info->irq;
- info->fb = int_cfb_info->fb.screen_base;
- info->fb_size = int_cfb_info->fb.fix.smem_len;
- info->info = int_cfb_info;
-
- strscpy(info->dev_name, int_cfb_info->fb.fix.id,
- sizeof(info->dev_name));
- }
-
- return int_cfb_info != NULL;
-}
-EXPORT_SYMBOL(cyber2000fb_attach);
-
-/*
- * Detach a capture/tv driver from the core CyberX0X0 driver.
- */
-void cyber2000fb_detach(int idx)
-{
-}
-EXPORT_SYMBOL(cyber2000fb_detach);
#ifdef CONFIG_FB_CYBER2000_DDC
diff --git a/drivers/video/fbdev/cyber2000fb.h b/drivers/video/fbdev/cyber2000fb.h
index 04641aa13acc..21095df8fdb0 100644
--- a/drivers/video/fbdev/cyber2000fb.h
+++ b/drivers/video/fbdev/cyber2000fb.h
@@ -488,7 +488,5 @@ struct cyberpro_info {
* Note! Writing to the Cyber20x0 registers from an interrupt
* routine is definitely a bad idea atm.
*/
-int cyber2000fb_attach(struct cyberpro_info *info, int idx);
-void cyber2000fb_detach(int idx);
void cyber2000fb_enable_extregs(struct cfb_info *cfb);
void cyber2000fb_disable_extregs(struct cfb_info *cfb);
diff --git a/drivers/video/fbdev/macmodes.c b/drivers/video/fbdev/macmodes.c
index d6be3c67d3df..b16a9d9bef98 100644
--- a/drivers/video/fbdev/macmodes.c
+++ b/drivers/video/fbdev/macmodes.c
@@ -16,6 +16,7 @@
*/
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/fb.h>
#include <linux/string.h>
#include <linux/module.h>
@@ -236,7 +237,7 @@ int mac_vmode_to_var(int vmode, int cmode, struct fb_var_screeninfo *var)
case CMODE_8:
var->bits_per_pixel = 8;
var->red.offset = 0;
- var->red.length = 8;
+ var->red.length = 8;
var->green.offset = 0;
var->green.length = 8;
var->blue.offset = 0;
diff --git a/drivers/video/fbdev/matrox/g450_pll.c b/drivers/video/fbdev/matrox/g450_pll.c
index ff8e321a22ce..e2c1478aa47f 100644
--- a/drivers/video/fbdev/matrox/g450_pll.c
+++ b/drivers/video/fbdev/matrox/g450_pll.c
@@ -14,6 +14,8 @@
*
*/
+#include <linux/export.h>
+
#include "g450_pll.h"
#include "matroxfb_DAC1064.h"
@@ -258,13 +260,13 @@ static inline unsigned int g450_findworkingpll(struct matrox_fb_info *minfo,
unsigned int found = 0;
unsigned int idx;
unsigned int mnpfound = mnparray[0];
-
+
for (idx = 0; idx < mnpcount; idx++) {
unsigned int sarray[3];
unsigned int *sptr;
{
unsigned int mnp;
-
+
sptr = sarray;
mnp = mnparray[idx];
if (mnp & 0x38) {
@@ -277,7 +279,7 @@ static inline unsigned int g450_findworkingpll(struct matrox_fb_info *minfo,
}
while (sptr >= sarray) {
unsigned int mnp = *sptr--;
-
+
if (g450_testpll(minfo, mnp - 0x0300, pll) &&
g450_testpll(minfo, mnp + 0x0300, pll) &&
g450_testpll(minfo, mnp - 0x0200, pll) &&
@@ -310,12 +312,12 @@ static int g450_checkcache(struct matrox_fb_info *minfo,
struct matrox_pll_cache *ci, unsigned int mnp_key)
{
unsigned int i;
-
+
mnp_key &= G450_MNP_FREQBITS;
for (i = 0; i < ci->valid; i++) {
if (ci->data[i].mnp_key == mnp_key) {
unsigned int mnp;
-
+
mnp = ci->data[i].mnp_value;
if (i) {
memmove(ci->data + 1, ci->data, i * sizeof(*ci->data));
@@ -343,7 +345,7 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
{
u_int8_t tmp, xpwrctrl;
unsigned long flags;
-
+
matroxfb_DAC_lock_irqsave(flags);
xpwrctrl = matroxfb_DAC_in(minfo, M1064_XPWRCTRL);
@@ -375,7 +377,7 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
}
{
u_int8_t misc;
-
+
misc = mga_inb(M_MISC_REG_READ) & ~0x0C;
switch (pll) {
case M_PIXEL_PLL_A:
@@ -409,13 +411,13 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
u_int8_t tmp;
unsigned int mnp;
unsigned long flags;
-
+
matroxfb_DAC_lock_irqsave(flags);
tmp = matroxfb_DAC_in(minfo, M1064_XPWRCTRL);
if (!(tmp & 2)) {
matroxfb_DAC_out(minfo, M1064_XPWRCTRL, tmp | 2);
}
-
+
mnp = matroxfb_DAC_in(minfo, M1064_XPIXPLLCM) << 16;
mnp |= matroxfb_DAC_in(minfo, M1064_XPIXPLLCN) << 8;
matroxfb_DAC_unlock_irqrestore(flags);
@@ -441,7 +443,7 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
delta = pll_freq_delta(fout, g450_vco2f(mnp, vco));
for (idx = mnpcount; idx > 0; idx--) {
/* == is important; due to nextpll algorithm we get
- sorted equally good frequencies from lower VCO
+ sorted equally good frequencies from lower VCO
frequency to higher - with <= lowest wins, while
with < highest one wins */
if (delta <= deltaarray[idx-1]) {
@@ -472,7 +474,7 @@ static int __g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
{
unsigned long flags;
unsigned int mnp;
-
+
matroxfb_DAC_lock_irqsave(flags);
mnp = g450_checkcache(minfo, ci, mnparray[0]);
if (mnp != NO_MORE_MNP) {
@@ -495,7 +497,7 @@ int matroxfb_g450_setclk(struct matrox_fb_info *minfo, unsigned int fout,
unsigned int pll)
{
unsigned int* arr;
-
+
arr = kmalloc(sizeof(*arr) * MNP_TABLE_SIZE * 2, GFP_KERNEL);
if (arr) {
int r;
diff --git a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
index 398b7035f5a9..9a893b70ab19 100644
--- a/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
+++ b/drivers/video/fbdev/matrox/matroxfb_DAC1064.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/export.h>
#include "matroxfb_DAC1064.h"
#include "matroxfb_misc.h"
@@ -43,11 +44,11 @@ static void DAC1064_calcclock(const struct matrox_fb_info *minfo,
unsigned int p;
DBG(__func__)
-
+
/* only for devices older than G450 */
fvco = PLL_calcclock(minfo, freq, fmax, in, feed, &p);
-
+
p = (1 << p) - 1;
if (fvco <= 100000)
;
@@ -169,7 +170,7 @@ static void g450_set_plls(struct matrox_fb_info *minfo)
struct matrox_hw_state *hw = &minfo->hw;
int pixelmnp;
int videomnp;
-
+
c2_ctl = hw->crtc2.ctl & ~0x4007; /* Clear PLL + enable for CRTC2 */
c2_ctl |= 0x0001; /* Enable CRTC2 */
hw->DACreg[POS1064_XPWRCTRL] &= ~0x02; /* Stop VIDEO PLL */
@@ -192,7 +193,7 @@ static void g450_set_plls(struct matrox_fb_info *minfo)
}
c2_ctl |= 0x0006; /* Use video PLL */
hw->DACreg[POS1064_XPWRCTRL] |= 0x02;
-
+
outDAC1064(minfo, M1064_XPWRCTRL, hw->DACreg[POS1064_XPWRCTRL]);
matroxfb_g450_setpll_cond(minfo, videomnp, M_VIDEO_PLL);
}
@@ -200,7 +201,7 @@ static void g450_set_plls(struct matrox_fb_info *minfo)
hw->DACreg[POS1064_XPIXCLKCTRL] &= ~M1064_XPIXCLKCTRL_PLL_UP;
if (pixelmnp >= 0) {
hw->DACreg[POS1064_XPIXCLKCTRL] |= M1064_XPIXCLKCTRL_PLL_UP;
-
+
outDAC1064(minfo, M1064_XPIXCLKCTRL, hw->DACreg[POS1064_XPIXCLKCTRL]);
matroxfb_g450_setpll_cond(minfo, pixelmnp, M_PIXEL_PLL_C);
}
@@ -303,9 +304,9 @@ void DAC1064_global_init(struct matrox_fb_info *minfo)
poweroff TMDS. But if we boot with DFP connected,
TMDS generated clocks are used instead of ALL pixclocks
available... If someone knows which register
- handles it, please reveal this secret to me... */
+ handles it, please reveal this secret to me... */
hw->DACreg[POS1064_XPWRCTRL] &= ~0x04; /* Poweroff TMDS */
-#endif
+#endif
break;
}
/* Now set timming related variables... */
@@ -728,14 +729,14 @@ static void g450_mclk_init(struct matrox_fb_info *minfo)
} else {
unsigned long flags;
unsigned int pwr;
-
+
matroxfb_DAC_lock_irqsave(flags);
pwr = inDAC1064(minfo, M1064_XPWRCTRL) & ~0x02;
outDAC1064(minfo, M1064_XPWRCTRL, pwr);
matroxfb_DAC_unlock_irqrestore(flags);
}
matroxfb_g450_setclk(minfo, minfo->values.pll.system, M_SYSTEM_PLL);
-
+
/* switch clocks to their real PLL source(s) */
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg | 4);
pci_write_config_dword(minfo->pcidev, PCI_OPTION3_REG, minfo->values.reg.opt3);
@@ -748,15 +749,15 @@ static void g450_memory_init(struct matrox_fb_info *minfo)
/* disable memory refresh */
minfo->hw.MXoptionReg &= ~0x001F8000;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
-
+
/* set memory interface parameters */
minfo->hw.MXoptionReg &= ~0x00207E00;
minfo->hw.MXoptionReg |= 0x00207E00 & minfo->values.reg.opt;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
pci_write_config_dword(minfo->pcidev, PCI_OPTION2_REG, minfo->values.reg.opt2);
-
+
mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst);
-
+
/* first set up memory interface with disabled memory interface clocks */
pci_write_config_dword(minfo->pcidev, PCI_MEMMISC_REG, minfo->values.reg.memmisc & ~0x80000000U);
mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk);
@@ -765,25 +766,25 @@ static void g450_memory_init(struct matrox_fb_info *minfo)
pci_write_config_dword(minfo->pcidev, PCI_MEMMISC_REG, minfo->values.reg.memmisc | 0x80000000U);
udelay(200);
-
+
if (minfo->values.memory.ddr && (!minfo->values.memory.emrswen || !minfo->values.memory.dll)) {
mga_outl(M_MEMRDBK, minfo->values.reg.memrdbk & ~0x1000);
}
mga_outl(M_MACCESS, minfo->values.reg.maccess | 0x8000);
-
+
udelay(200);
-
+
minfo->hw.MXoptionReg |= 0x001F8000 & minfo->values.reg.opt;
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
-
+
/* value is written to memory chips only if old != new */
mga_outl(M_PLNWT, 0);
mga_outl(M_PLNWT, ~0);
-
+
if (minfo->values.reg.mctlwtst != minfo->values.reg.mctlwtst_core) {
mga_outl(M_CTLWTST, minfo->values.reg.mctlwtst_core);
}
-
+
}
static void g450_preinit(struct matrox_fb_info *minfo)
@@ -791,7 +792,7 @@ static void g450_preinit(struct matrox_fb_info *minfo)
u_int32_t c2ctl;
u_int8_t curctl;
u_int8_t c1ctl;
-
+
/* minfo->hw.MXoptionReg = minfo->values.reg.opt; */
minfo->hw.MXoptionReg &= 0xC0000100;
minfo->hw.MXoptionReg |= 0x00000020;
@@ -805,7 +806,7 @@ static void g450_preinit(struct matrox_fb_info *minfo)
pci_write_config_dword(minfo->pcidev, PCI_OPTION_REG, minfo->hw.MXoptionReg);
/* Init system clocks */
-
+
/* stop crtc2 */
c2ctl = mga_inl(M_C2CTL);
mga_outl(M_C2CTL, c2ctl & ~1);
@@ -818,20 +819,20 @@ static void g450_preinit(struct matrox_fb_info *minfo)
g450_mclk_init(minfo);
g450_memory_init(minfo);
-
+
/* set legacy VGA clock sources for DOSEmu or VMware... */
matroxfb_g450_setclk(minfo, 25175, M_PIXEL_PLL_A);
matroxfb_g450_setclk(minfo, 28322, M_PIXEL_PLL_B);
/* restore crtc1 */
mga_setr(M_SEQ_INDEX, 1, c1ctl);
-
+
/* restore cursor */
outDAC1064(minfo, M1064_XCURCTRL, curctl);
/* restore crtc2 */
mga_outl(M_C2CTL, c2ctl);
-
+
return;
}
diff --git a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
index f53b8066e8a5..4eb636cd1f89 100644
--- a/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
+++ b/drivers/video/fbdev/matrox/matroxfb_Ti3026.c
@@ -79,6 +79,7 @@
*
*/
+#include <linux/export.h>
#include "matroxfb_Ti3026.h"
#include "matroxfb_misc.h"
diff --git a/drivers/video/fbdev/matrox/matroxfb_accel.c b/drivers/video/fbdev/matrox/matroxfb_accel.c
index 52e15dc6f45b..2670db392da2 100644
--- a/drivers/video/fbdev/matrox/matroxfb_accel.c
+++ b/drivers/video/fbdev/matrox/matroxfb_accel.c
@@ -77,6 +77,8 @@
*
*/
+#include <linux/export.h>
+
#include "matroxfb_accel.h"
#include "matroxfb_DAC1064.h"
#include "matroxfb_Ti3026.h"
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 81603ce05a22..5be0cdcd7c71 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -101,6 +101,7 @@
*/
#include <linux/aperture.h>
+#include <linux/export.h>
#include <linux/version.h>
#include "matroxfb_base.h"
diff --git a/drivers/video/fbdev/matrox/matroxfb_g450.c b/drivers/video/fbdev/matrox/matroxfb_g450.c
index df3309fd14f3..800c05b70ee3 100644
--- a/drivers/video/fbdev/matrox/matroxfb_g450.c
+++ b/drivers/video/fbdev/matrox/matroxfb_g450.c
@@ -13,6 +13,8 @@
*
*/
+#include <linux/export.h>
+
#include "matroxfb_base.h"
#include "matroxfb_misc.h"
#include "matroxfb_DAC1064.h"
@@ -32,29 +34,29 @@ struct mctl {
#define WLMAX 0x3FF
static const struct mctl g450_controls[] =
-{ { { V4L2_CID_BRIGHTNESS, V4L2_CTRL_TYPE_INTEGER,
+{ { { V4L2_CID_BRIGHTNESS, V4L2_CTRL_TYPE_INTEGER,
"brightness",
- 0, WLMAX-BLMIN, 1, 370-BLMIN,
+ 0, WLMAX-BLMIN, 1, 370-BLMIN,
0,
}, offsetof(struct matrox_fb_info, altout.tvo_params.brightness) },
- { { V4L2_CID_CONTRAST, V4L2_CTRL_TYPE_INTEGER,
+ { { V4L2_CID_CONTRAST, V4L2_CTRL_TYPE_INTEGER,
"contrast",
- 0, 1023, 1, 127,
+ 0, 1023, 1, 127,
0,
}, offsetof(struct matrox_fb_info, altout.tvo_params.contrast) },
{ { V4L2_CID_SATURATION, V4L2_CTRL_TYPE_INTEGER,
"saturation",
- 0, 255, 1, 165,
+ 0, 255, 1, 165,
0,
}, offsetof(struct matrox_fb_info, altout.tvo_params.saturation) },
{ { V4L2_CID_HUE, V4L2_CTRL_TYPE_INTEGER,
"hue",
- 0, 255, 1, 0,
+ 0, 255, 1, 0,
0,
}, offsetof(struct matrox_fb_info, altout.tvo_params.hue) },
{ { MATROXFB_CID_TESTOUT, V4L2_CTRL_TYPE_BOOLEAN,
"test output",
- 0, 1, 1, 0,
+ 0, 1, 1, 0,
0,
}, offsetof(struct matrox_fb_info, altout.tvo_params.testout) },
};
@@ -89,7 +91,7 @@ static inline int *get_ctrl_ptr(struct matrox_fb_info *minfo, unsigned int idx)
static void tvo_fill_defaults(struct matrox_fb_info *minfo)
{
unsigned int i;
-
+
for (i = 0; i < G450CTRLS; i++) {
*get_ctrl_ptr(minfo, i) = g450_controls[i].desc.default_value;
}
@@ -99,7 +101,7 @@ static int cve2_get_reg(struct matrox_fb_info *minfo, int reg)
{
unsigned long flags;
int val;
-
+
matroxfb_DAC_lock_irqsave(flags);
matroxfb_DAC_out(minfo, 0x87, reg);
val = matroxfb_DAC_in(minfo, 0x88);
@@ -141,16 +143,16 @@ static void g450_compute_bwlevel(const struct matrox_fb_info *minfo, int *bl,
static int g450_query_ctrl(void* md, struct v4l2_queryctrl *p) {
int i;
-
+
i = get_ctrl_id(p->id);
if (i >= 0) {
*p = g450_controls[i].desc;
return 0;
}
if (i == -ENOENT) {
- static const struct v4l2_queryctrl disctrl =
+ static const struct v4l2_queryctrl disctrl =
{ .flags = V4L2_CTRL_FLAG_DISABLED };
-
+
i = p->id;
*p = disctrl;
p->id = i;
@@ -163,7 +165,7 @@ static int g450_query_ctrl(void* md, struct v4l2_queryctrl *p) {
static int g450_set_ctrl(void* md, struct v4l2_control *p) {
int i;
struct matrox_fb_info *minfo = md;
-
+
i = get_ctrl_id(p->id);
if (i < 0) return -EINVAL;
@@ -209,7 +211,7 @@ static int g450_set_ctrl(void* md, struct v4l2_control *p) {
}
break;
}
-
+
return 0;
}
@@ -217,7 +219,7 @@ static int g450_set_ctrl(void* md, struct v4l2_control *p) {
static int g450_get_ctrl(void* md, struct v4l2_control *p) {
int i;
struct matrox_fb_info *minfo = md;
-
+
i = get_ctrl_id(p->id);
if (i < 0) return -EINVAL;
p->value = *get_ctrl_ptr(minfo, i);
@@ -247,22 +249,22 @@ static void computeRegs(struct matrox_fb_info *minfo, struct mavenregs *r,
unsigned long long piic;
int mnp;
int over;
-
+
r->regs[0x80] = 0x03; /* | 0x40 for SCART */
hvis = ((mt->HDisplay << 1) + 3) & ~3;
-
+
if (hvis >= 2048) {
hvis = 2044;
}
-
+
piic = 1000000000ULL * hvis;
do_div(piic, outd->h_vis);
dprintk(KERN_DEBUG "Want %u kHz pixclock\n", (unsigned int)piic);
-
+
mnp = matroxfb_g450_setclk(minfo, piic, M_VIDEO_PLL);
-
+
mt->mnp = mnp;
mt->pixclock = g450_mnp2f(minfo, mnp);
@@ -275,7 +277,7 @@ static void computeRegs(struct matrox_fb_info *minfo, struct mavenregs *r,
piic = outd->chromasc;
do_div(piic, mt->pixclock);
chromasc = piic;
-
+
dprintk(KERN_DEBUG "Chroma is %08X\n", chromasc);
r->regs[0] = piic >> 24;
@@ -287,7 +289,7 @@ static void computeRegs(struct matrox_fb_info *minfo, struct mavenregs *r,
hsl = (((outd->h_sync + pixclock) / pixclock)) & ~1;
hlen = hvis + hfp + hsl + hbp;
over = hlen & 0x0F;
-
+
dprintk(KERN_DEBUG "WL: vis=%u, hf=%u, hs=%u, hb=%u, total=%u\n", hvis, hfp, hsl, hbp, hlen);
if (over) {
@@ -310,14 +312,14 @@ static void computeRegs(struct matrox_fb_info *minfo, struct mavenregs *r,
r->regs[0x2C] = hfp;
r->regs[0x31] = hvis / 8;
r->regs[0x32] = hvis & 7;
-
+
dprintk(KERN_DEBUG "PG: vis=%04X, hf=%02X, hs=%02X, hb=%02X, total=%04X\n", hvis, hfp, hsl, hbp, hlen);
r->regs[0x84] = 1; /* x sync point */
r->regs[0x85] = 0;
hvis = hvis >> 1;
hlen = hlen >> 1;
-
+
dprintk(KERN_DEBUG "hlen=%u hvis=%u\n", hlen, hvis);
mt->interlaced = 1;
@@ -332,13 +334,13 @@ static void computeRegs(struct matrox_fb_info *minfo, struct mavenregs *r,
unsigned int vtotal;
unsigned int vsyncend;
unsigned int vdisplay;
-
+
vtotal = mt->VTotal;
vsyncend = mt->VSyncEnd;
vdisplay = mt->VDisplay;
if (vtotal < outd->v_total) {
unsigned int yovr = outd->v_total - vtotal;
-
+
vsyncend += yovr >> 1;
} else if (vtotal > outd->v_total) {
vdisplay = outd->v_total - 4;
@@ -350,7 +352,7 @@ static void computeRegs(struct matrox_fb_info *minfo, struct mavenregs *r,
r->regs[0x33] = upper - 1; /* upper blanking */
r->regs[0x82] = upper; /* y sync point */
r->regs[0x83] = upper >> 8;
-
+
mt->VDisplay = vdisplay;
mt->VSyncStart = outd->v_total - 2;
mt->VSyncEnd = outd->v_total;
@@ -509,9 +511,9 @@ static void cve2_init_TV(struct matrox_fb_info *minfo,
LR(0x80);
LR(0x82); LR(0x83);
LR(0x84); LR(0x85);
-
+
cve2_set_reg(minfo, 0x3E, 0x01);
-
+
for (i = 0; i < 0x3E; i++) {
LR(i);
}
@@ -558,7 +560,7 @@ static int matroxfb_g450_compute(void* md, struct my_timming* mt) {
static int matroxfb_g450_program(void* md) {
struct matrox_fb_info *minfo = md;
-
+
if (minfo->outputs[1].mode != MATROXFB_OUTPUT_MODE_MONITOR) {
cve2_init_TV(minfo, &minfo->hw.maven);
}
diff --git a/drivers/video/fbdev/matrox/matroxfb_misc.c b/drivers/video/fbdev/matrox/matroxfb_misc.c
index 8f159a2ad8d0..2c5f0099532b 100644
--- a/drivers/video/fbdev/matrox/matroxfb_misc.c
+++ b/drivers/video/fbdev/matrox/matroxfb_misc.c
@@ -85,6 +85,7 @@
*
*/
+#include <linux/export.h>
#include "matroxfb_misc.h"
#include <linux/interrupt.h>
@@ -390,7 +391,7 @@ void matroxfb_vgaHWrestore(struct matrox_fb_info *minfo)
static void get_pins(unsigned char __iomem* pins, struct matrox_bios* bd) {
unsigned int b0 = readb(pins);
-
+
if (b0 == 0x2E && readb(pins+1) == 0x41) {
unsigned int pins_len = readb(pins+2);
unsigned int i;
@@ -426,7 +427,7 @@ static void get_pins(unsigned char __iomem* pins, struct matrox_bios* bd) {
static void get_bios_version(unsigned char __iomem * vbios, struct matrox_bios* bd) {
unsigned int pcir_offset;
-
+
pcir_offset = readb(vbios + 24) | (readb(vbios + 25) << 8);
if (pcir_offset >= 26 && pcir_offset < 0xFFE0 &&
readb(vbios + pcir_offset ) == 'P' &&
@@ -451,7 +452,7 @@ static void get_bios_version(unsigned char __iomem * vbios, struct matrox_bios*
static void get_bios_output(unsigned char __iomem* vbios, struct matrox_bios* bd) {
unsigned char b;
-
+
b = readb(vbios + 0x7FF1);
if (b == 0xFF) {
b = 0;
@@ -461,7 +462,7 @@ static void get_bios_output(unsigned char __iomem* vbios, struct matrox_bios* bd
static void get_bios_tvout(unsigned char __iomem* vbios, struct matrox_bios* bd) {
unsigned int i;
-
+
/* Check for 'IBM .*(V....TVO' string - it means TVO BIOS */
bd->output.tvout = 0;
if (readb(vbios + 0x1D) != 'I' ||
@@ -472,7 +473,7 @@ static void get_bios_tvout(unsigned char __iomem* vbios, struct matrox_bios* bd)
}
for (i = 0x2D; i < 0x2D + 128; i++) {
unsigned char b = readb(vbios + i);
-
+
if (b == '(' && readb(vbios + i + 1) == 'V') {
if (readb(vbios + i + 6) == 'T' &&
readb(vbios + i + 7) == 'V' &&
@@ -488,7 +489,7 @@ static void get_bios_tvout(unsigned char __iomem* vbios, struct matrox_bios* bd)
static void parse_bios(unsigned char __iomem* vbios, struct matrox_bios* bd) {
unsigned int pins_offset;
-
+
if (readb(vbios) != 0x55 || readb(vbios + 1) != 0xAA) {
return;
}
@@ -648,9 +649,9 @@ static int parse_pins5(struct matrox_fb_info *minfo,
const struct matrox_bios *bd)
{
unsigned int mult;
-
+
mult = bd->pins[4]?8000:6000;
-
+
minfo->limits.pixel.vcomax = (bd->pins[ 38] == 0xFF) ? 600000 : bd->pins[ 38] * mult;
minfo->limits.system.vcomax = (bd->pins[ 36] == 0xFF) ? minfo->limits.pixel.vcomax : bd->pins[ 36] * mult;
minfo->limits.video.vcomax = (bd->pins[ 37] == 0xFF) ? minfo->limits.system.vcomax : bd->pins[ 37] * mult;
@@ -770,7 +771,7 @@ void matroxfb_read_pins(struct matrox_fb_info *minfo)
u32 biosbase;
u32 fbbase;
struct pci_dev *pdev = minfo->pcidev;
-
+
memset(&minfo->bios, 0, sizeof(minfo->bios));
pci_read_config_dword(pdev, PCI_OPTION_REG, &opt);
pci_write_config_dword(pdev, PCI_OPTION_REG, opt | PCI_OPTION_ENABLE_ROM);
@@ -790,7 +791,7 @@ void matroxfb_read_pins(struct matrox_fb_info *minfo)
} else {
unsigned int ven = readb(b+0x64+0) | (readb(b+0x64+1) << 8);
unsigned int dev = readb(b+0x64+2) | (readb(b+0x64+3) << 8);
-
+
if (ven != pdev->vendor || dev != pdev->device) {
printk(KERN_INFO "matroxfb: Legacy BIOS is for %04X:%04X, while this device is %04X:%04X\n",
ven, dev, pdev->vendor, pdev->device);
diff --git a/drivers/video/fbdev/mb862xx/mb862xx-i2c.c b/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
index ffdb1597d303..08b7f19a67a5 100644
--- a/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
+++ b/drivers/video/fbdev/mb862xx/mb862xx-i2c.c
@@ -9,7 +9,6 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/delay.h>
-#include <linux/export.h>
#include "mb862xxfb.h"
#include "mb862xx_reg.h"
diff --git a/drivers/video/fbdev/omap/lcd_dma.c b/drivers/video/fbdev/omap/lcd_dma.c
index 0da23c57e475..56300be71c57 100644
--- a/drivers/video/fbdev/omap/lcd_dma.c
+++ b/drivers/video/fbdev/omap/lcd_dma.c
@@ -18,6 +18,7 @@
* Support functions for the OMAP internal DMA channels.
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
diff --git a/drivers/video/fbdev/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c
index abb8b11464e8..53ca58ec5eed 100644
--- a/drivers/video/fbdev/omap/lcdc.c
+++ b/drivers/video/fbdev/omap/lcdc.c
@@ -5,8 +5,10 @@
* Copyright (C) 2004 Nokia Corporation
* Author: Imre Deak <imre.deak@nokia.com>
*/
+
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/err.h>
diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c
index 2682b20d184a..106d21e74738 100644
--- a/drivers/video/fbdev/omap/omapfb_main.c
+++ b/drivers/video/fbdev/omap/omapfb_main.c
@@ -11,6 +11,8 @@
* Dirk Behme <dirk.behme@de.bosch.com> - changes for 2.6 kernel API
* Texas Instruments - H3 support
*/
+
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/mm.h>
#include <linux/slab.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/apply.c b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
index acca991c7540..39947e569a54 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/apply.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
@@ -6,6 +6,7 @@
#define DSS_SUBSYS_NAME "APPLY"
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index 55b640f2f245..02ea41f6c8f4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index cc2ad787d493..7831c6a2eedb 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -6,6 +6,7 @@
#define DSS_SUBSYS_NAME "APPLY"
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c
index f91db94c9905..16543425bd84 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c
@@ -11,6 +11,7 @@
#define DSS_SUBSYS_NAME "DISPLAY"
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/jiffies.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
index 86ed4c077c30..ad8ae1727966 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
@@ -13,7 +13,6 @@
#include <linux/kernel.h>
#include <linux/delay.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index 7c636db79882..f90a8eff7259 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -6,6 +6,7 @@
#include <linux/device.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
index 62c2d48d9e09..38be57ba8c28 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
@@ -6,6 +6,7 @@
* Author: Archit Taneja <archit@ti.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager.c b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
index 2c2da35345d0..c59e5689d6cc 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
@@ -11,6 +11,7 @@
#define DSS_SUBSYS_NAME "MANAGER"
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/output.c b/drivers/video/fbdev/omap2/omapfb/dss/output.c
index 4e2992a0ce50..48cbfb75443f 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/output.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/output.c
@@ -4,6 +4,7 @@
* Author: Archit Taneja <archit@ti.com>
*/
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
index 8c8e627da13d..bbbdc233ee61 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/sysfs.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
index 2d3e5d4467c5..68e569ec0f83 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
@@ -12,7 +12,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
-#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/string.h>
#include <linux/of.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index f99dda9e55a5..ed283029ad95 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/completion.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index ea8c88aa4477..152dbeaa6451 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -16,7 +16,6 @@
#include <linux/mm.h>
#include <linux/omapfb.h>
#include <linux/vmalloc.h>
-#include <linux/export.h>
#include <linux/sizes.h>
#include <video/omapfb_dss.h>
diff --git a/drivers/video/fbdev/omap2/omapfb/vrfb.c b/drivers/video/fbdev/omap2/omapfb/vrfb.c
index 568e6e1eca62..675482cde519 100644
--- a/drivers/video/fbdev/omap2/omapfb/vrfb.c
+++ b/drivers/video/fbdev/omap2/omapfb/vrfb.c
@@ -9,6 +9,7 @@
/*#define DEBUG*/
#include <linux/err.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ioport.h>
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index ee6da5084242..baf87f34cc24 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1030,9 +1030,8 @@ static inline unsigned int get_pcd(struct pxafb_info *fbi,
/*
* Some touchscreens need hsync information from the video driver to
- * function correctly. We export it here. Note that 'hsync_time' and
- * the value returned from pxafb_get_hsync_time() is the *reciprocal*
- * of the hsync period in seconds.
+ * function correctly. We export it here. Note that 'hsync_time' is
+ * the *reciprocal* of the hsync period in seconds.
*/
static inline void set_hsync_time(struct pxafb_info *fbi, unsigned int pcd)
{
@@ -1048,18 +1047,6 @@ static inline void set_hsync_time(struct pxafb_info *fbi, unsigned int pcd)
fbi->hsync_time = htime;
}
-unsigned long pxafb_get_hsync_time(struct device *dev)
-{
- struct pxafb_info *fbi = dev_get_drvdata(dev);
-
- /* If display is blanked/suspended, hsync isn't active */
- if (!fbi || (fbi->state != C_ENABLE))
- return 0;
-
- return fbi->hsync_time;
-}
-EXPORT_SYMBOL(pxafb_get_hsync_time);
-
static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal,
unsigned long start, size_t size)
{
diff --git a/drivers/video/fbdev/sbuslib.c b/drivers/video/fbdev/sbuslib.c
index 4c79654bda30..dd2002d0810f 100644
--- a/drivers/video/fbdev/sbuslib.c
+++ b/drivers/video/fbdev/sbuslib.c
@@ -5,6 +5,7 @@
*/
#include <linux/compat.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
diff --git a/drivers/video/fbdev/sis/sis.h b/drivers/video/fbdev/sis/sis.h
index d632f096083b..3d658482c69d 100644
--- a/drivers/video/fbdev/sis/sis.h
+++ b/drivers/video/fbdev/sis/sis.h
@@ -673,9 +673,7 @@ unsigned int sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg);
/* SiS-specific exported functions */
void sis_malloc(struct sis_memreq *req);
-void sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req);
void sis_free(u32 base);
-void sis_free_new(struct pci_dev *pdev, u32 base);
/* Routines from init.c/init301.c */
extern unsigned short SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay,
diff --git a/drivers/video/fbdev/sis/sis_main.c b/drivers/video/fbdev/sis/sis_main.c
index 75033e6be15a..84567d67f71d 100644
--- a/drivers/video/fbdev/sis/sis_main.c
+++ b/drivers/video/fbdev/sis/sis_main.c
@@ -3421,14 +3421,6 @@ sis_malloc(struct sis_memreq *req)
req->offset = req->size = 0;
}
-void
-sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req)
-{
- struct sis_video_info *ivideo = pci_get_drvdata(pdev);
-
- sis_int_malloc(ivideo, req);
-}
-
/* sis_free: u32 because "base" is offset inside video ram, can never be >4GB */
static void
@@ -3455,14 +3447,6 @@ sis_free(u32 base)
sis_int_free(ivideo, base);
}
-void
-sis_free_new(struct pci_dev *pdev, u32 base)
-{
- struct sis_video_info *ivideo = pci_get_drvdata(pdev);
-
- sis_int_free(ivideo, base);
-}
-
/* --------------------- SetMode routines ------------------------- */
static void
@@ -6832,12 +6816,3 @@ MODULE_PARM_DESC(videoram,
#endif
#endif /* /MODULE */
-
-/* _GPL only for new symbols. */
-EXPORT_SYMBOL(sis_malloc);
-EXPORT_SYMBOL(sis_free);
-EXPORT_SYMBOL_GPL(sis_malloc_new);
-EXPORT_SYMBOL_GPL(sis_free_new);
-
-
-
diff --git a/drivers/video/fbdev/via/via-core.c b/drivers/video/fbdev/via/via-core.c
index 908524a74a38..a8d4a3e2c65e 100644
--- a/drivers/video/fbdev/via/via-core.c
+++ b/drivers/video/fbdev/via/via-core.c
@@ -9,6 +9,7 @@
* Core code for the Via multifunction framebuffer device.
*/
#include <linux/aperture.h>
+#include <linux/export.h>
#include <linux/via-core.h>
#include <linux/via_i2c.h>
#include "via-gpio.h"
diff --git a/drivers/video/fbdev/via/via-gpio.c b/drivers/video/fbdev/via/via-gpio.c
index 27226a8f3f42..72302384bf77 100644
--- a/drivers/video/fbdev/via/via-gpio.c
+++ b/drivers/video/fbdev/via/via-gpio.c
@@ -10,7 +10,6 @@
#include <linux/gpio/machine.h>
#include <linux/platform_device.h>
#include <linux/via-core.h>
-#include <linux/export.h>
#include "via-gpio.h"
/*
diff --git a/drivers/video/fbdev/via/via_i2c.c b/drivers/video/fbdev/via/via_i2c.c
index cdbd7a9b8817..42c378425586 100644
--- a/drivers/video/fbdev/via/via_i2c.c
+++ b/drivers/video/fbdev/via/via_i2c.c
@@ -7,6 +7,7 @@
#include <linux/platform_device.h>
#include <linux/delay.h>
+#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/via-core.h>
diff --git a/drivers/video/fbdev/wmt_ge_rops.c b/drivers/video/fbdev/wmt_ge_rops.c
index 92fbb3f3a0d3..2bd26bfb2b46 100644
--- a/drivers/video/fbdev/wmt_ge_rops.c
+++ b/drivers/video/fbdev/wmt_ge_rops.c
@@ -7,6 +7,7 @@
* Copyright (C) 2010 Alexey Charkov <alchark@gmail.com>
*/
+#include <linux/export.h>
#include <linux/module.h>
#include <linux/fb.h>
#include <linux/io.h>