diff options
96 files changed, 1655 insertions, 1355 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,tdp158.yaml b/Documentation/devicetree/bindings/display/bridge/ti,tdp158.yaml new file mode 100644 index 000000000000..1c522f72c4ba --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/ti,tdp158.yaml @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/bridge/ti,tdp158.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TI TDP158 HDMI to TMDS Redriver + +maintainers: + - Arnaud Vrac <avrac@freebox.fr> + - Pierre-Hugues Husson <phhusson@freebox.fr> + +properties: + compatible: + const: ti,tdp158 + +# The reg property is required if and only if the device is connected +# to an I2C bus. In pin strap mode, reg must not be specified. + reg: + description: I2C address of the device + +# Pin 36 = Operation Enable / Reset Pin +# OE = L: Power Down Mode +# OE = H: Normal Operation +# Internal weak pullup - device resets on H to L transitions + enable-gpios: + description: GPIO controlling bridge enable + + vcc-supply: + description: Power supply 3.3V + + vdd-supply: + description: Power supply 1.1V + + ports: + $ref: /schemas/graph.yaml#/properties/ports + + properties: + port@0: + $ref: /schemas/graph.yaml#/properties/port + description: Bridge input + + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: Bridge output + + required: + - port@0 + - port@1 + +required: + - compatible + - vcc-supply + - vdd-supply + - ports + +additionalProperties: false diff --git a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt index 3c35338a2867..269b1ae2fca9 100644 --- a/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt +++ b/Documentation/devicetree/bindings/display/imx/fsl-imx-drm.txt @@ -119,7 +119,6 @@ Optional properties: - interface-pix-fmt: How this display is connected to the display interface. Currently supported types: "rgb24", "rgb565", "bgr666" and "lvds666". -- edid: verbatim EDID data block describing attached display. - ddc: phandle describing the i2c bus handling the display data channel - port@[0-1]: Port nodes with endpoint definitions as defined in @@ -131,7 +130,6 @@ example: disp0 { compatible = "fsl,imx-parallel-display"; - edid = [edid-data]; interface-pix-fmt = "rgb24"; port@0 { diff --git a/Documentation/devicetree/bindings/display/imx/ldb.txt b/Documentation/devicetree/bindings/display/imx/ldb.txt index 8e6e7d797943..03653a291b54 100644 --- a/Documentation/devicetree/bindings/display/imx/ldb.txt +++ b/Documentation/devicetree/bindings/display/imx/ldb.txt @@ -62,7 +62,6 @@ Required properties: display-timings are used instead. Optional properties (required if display-timings are used): - - ddc-i2c-bus: phandle of an I2C controller used for DDC EDID probing - display-timings : A node that describes the display timings as defined in Documentation/devicetree/bindings/display/panel/display-timing.txt. - fsl,data-mapping : should be "spwg" or "jeida" diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml index 278399adc550..735c7f06c24e 100644 --- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml +++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml @@ -26,6 +26,7 @@ properties: - renesas,r9a07g054-mali - rockchip,px30-mali - rockchip,rk3568-mali + - rockchip,rk3576-mali - const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable - items: - enum: diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst index 370d820be248..b75cc9a70d1f 100644 --- a/Documentation/gpu/drm-uapi.rst +++ b/Documentation/gpu/drm-uapi.rst @@ -305,13 +305,26 @@ Kernel Mode Driver ------------------ The KMD is responsible for checking if the device needs a reset, and to perform -it as needed. Usually a hang is detected when a job gets stuck executing. KMD -should keep track of resets, because userspace can query any time about the -reset status for a specific context. This is needed to propagate to the rest of -the stack that a reset has happened. Currently, this is implemented by each -driver separately, with no common DRM interface. Ideally this should be properly -integrated at DRM scheduler to provide a common ground for all drivers. After a -reset, KMD should reject new command submissions for affected contexts. +it as needed. Usually a hang is detected when a job gets stuck executing. + +Propagation of errors to userspace has proven to be tricky since it goes in +the opposite direction of the usual flow of commands. Because of this vendor +independent error handling was added to the &dma_fence object, this way drivers +can add an error code to their fences before signaling them. See function +dma_fence_set_error() on how to do this and for examples of error codes to use. + +The DRM scheduler also allows setting error codes on all pending fences when +hardware submissions are restarted after an reset. Error codes are also +forwarded from the hardware fence to the scheduler fence to bubble up errors +to the higher levels of the stack and eventually userspace. + +Fence errors can be queried by userspace through the generic SYNC_IOC_FILE_INFO +IOCTL as well as through driver specific interfaces. + +Additional to setting fence errors drivers should also keep track of resets per +context, the DRM scheduler provides the drm_sched_entity_error() function as +helper for this use case. After a reset, KMD should reject new command +submissions for affected contexts. User Mode Driver ---------------- diff --git a/Documentation/gpu/komeda-kms.rst b/Documentation/gpu/komeda-kms.rst index 633a016563ae..eaea40eb725b 100644 --- a/Documentation/gpu/komeda-kms.rst +++ b/Documentation/gpu/komeda-kms.rst @@ -86,7 +86,7 @@ types of working mode: - Single display mode Two pipelines work together to drive only one display output. - On this mode, pipeline_B doesn't work indenpendently, but outputs its + On this mode, pipeline_B doesn't work independently, but outputs its composition result into pipeline_A, and its pixel timing also derived from pipeline_A.timing_ctrlr. The pipeline_B works just like a "slave" of pipeline_A(master) diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 2b281e3c75a4..256d0d1cb216 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -834,6 +834,22 @@ Contact: Javier Martinez Canillas <javierm@redhat.com> Level: Advanced +Querying errors from drm_syncobj +================================ + +The drm_syncobj container can be used by driver independent code to signal +complection of submission. + +One minor feature still missing is a generic DRM IOCTL to query the error +status of binary and timeline drm_syncobj. + +This should probably be improved by implementing the necessary kernel interface +and adding support for that in the userspace stack. + +Contact: Christian König + +Level: Starter + Outside DRM =========== diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index c384004b918e..39c1e533a2f2 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -366,7 +366,7 @@ static const struct dma_heap_ops cma_heap_ops = { .allocate = cma_heap_allocate, }; -static int __add_cma_heap(struct cma *cma, void *data) +static int __init __add_cma_heap(struct cma *cma, void *data) { struct cma_heap *cma_heap; struct dma_heap_export_info exp_info; @@ -391,7 +391,7 @@ static int __add_cma_heap(struct cma *cma, void *data) return 0; } -static int add_default_cma_heap(void) +static int __init add_default_cma_heap(void) { struct cma *default_cma = dev_get_cma_area(NULL); int ret = 0; diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c index d78cdb9d01e5..26d5dc89ea16 100644 --- a/drivers/dma-buf/heaps/system_heap.c +++ b/drivers/dma-buf/heaps/system_heap.c @@ -421,7 +421,7 @@ static const struct dma_heap_ops system_heap_ops = { .allocate = system_heap_allocate, }; -static int system_heap_create(void) +static int __init system_heap_create(void) { struct dma_heap_export_info exp_info; diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig index 0051fb1b437f..fc3237da8090 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -6,6 +6,7 @@ config DRM_AMDGPU depends on !UML select FW_LOADER select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index 73b2b401b450..30c5172d208e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -300,7 +300,7 @@ static int suspend_resume_compute_scheduler(struct amdgpu_device *adev, bool sus if (r) goto out; } else { - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f4628412dac4..e479ff33a0a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5907,7 +5907,7 @@ skip_hw_reset: if (!amdgpu_ring_sched_ready(ring)) continue; - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); } if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) @@ -6414,7 +6414,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) if (!amdgpu_ring_sched_ready(ring)) continue; - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); } amdgpu_device_unset_mp1_state(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index ad6bf5d4e0a9..570820edcfda 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -146,7 +146,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) atomic_inc(&ring->adev->gpu_reset_counter); amdgpu_fence_driver_force_completion(ring); if (amdgpu_ring_sched_ready(ring)) - drm_sched_start(&ring->sched); + drm_sched_start(&ring->sched, 0); goto exit; } } diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 3eb955333c80..683cb33805b2 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -368,6 +368,13 @@ config DRM_TI_DLPC3433 It supports up to 720p resolution with 60 and 120 Hz refresh rates. +config DRM_TI_TDP158 + tristate "TI TDP158 HDMI/TMDS bridge" + depends on OF + select DRM_PANEL_BRIDGE + help + Texas Instruments TDP158 HDMI/TMDS Bridge driver + config DRM_TI_TFP410 tristate "TI TFP410 DVI/HDMI bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index 7df87b582dca..3daf803ce80b 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/ obj-$(CONFIG_DRM_TI_DLPC3433) += ti-dlpc3433.o obj-$(CONFIG_DRM_TI_SN65DSI83) += ti-sn65dsi83.o obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o +obj-$(CONFIG_DRM_TI_TDP158) += ti-tdp158.o obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o diff --git a/drivers/gpu/drm/bridge/imx/Kconfig b/drivers/gpu/drm/bridge/imx/Kconfig index 8dd89efa8ea7..9a480c6abb85 100644 --- a/drivers/gpu/drm/bridge/imx/Kconfig +++ b/drivers/gpu/drm/bridge/imx/Kconfig @@ -3,6 +3,16 @@ if ARCH_MXC || COMPILE_TEST config DRM_IMX_LDB_HELPER tristate +config DRM_IMX_LEGACY_BRIDGE + tristate + depends on DRM_IMX + help + This is a DRM bridge implementation for the DRM i.MX IPUv3 driver, + that uses of_get_drm_display_mode to acquire display mode. + + Newer designs should not use this bridge and should use proper panel + driver instead. + config DRM_IMX8MP_DW_HDMI_BRIDGE tristate "Freescale i.MX8MP HDMI-TX bridge support" depends on OF diff --git a/drivers/gpu/drm/bridge/imx/Makefile b/drivers/gpu/drm/bridge/imx/Makefile index edb0a7b71b30..dd5d48584806 100644 --- a/drivers/gpu/drm/bridge/imx/Makefile +++ b/drivers/gpu/drm/bridge/imx/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_DRM_IMX_LDB_HELPER) += imx-ldb-helper.o +obj-$(CONFIG_DRM_IMX_LEGACY_BRIDGE) += imx-legacy-bridge.o obj-$(CONFIG_DRM_IMX8MP_DW_HDMI_BRIDGE) += imx8mp-hdmi-tx.o obj-$(CONFIG_DRM_IMX8MP_HDMI_PVI) += imx8mp-hdmi-pvi.o obj-$(CONFIG_DRM_IMX8QM_LDB) += imx8qm-ldb.o diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c new file mode 100644 index 000000000000..07a78a02a884 --- /dev/null +++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Freescale i.MX drm driver + * + * bridge driver for legacy DT bindings, utilizing display-timings node + */ + +#include <drm/drm_bridge.h> +#include <drm/drm_modes.h> +#include <drm/drm_probe_helper.h> +#include <drm/bridge/imx.h> + +#include <video/of_display_timing.h> +#include <video/of_videomode.h> + +struct imx_legacy_bridge { + struct drm_bridge base; + + struct drm_display_mode mode; + u32 bus_flags; +}; + +#define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base) + +static int imx_legacy_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) + return -EINVAL; + + return 0; +} + +static int imx_legacy_bridge_get_modes(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct imx_legacy_bridge *imx_bridge = to_imx_legacy_bridge(bridge); + int ret; + + ret = drm_connector_helper_get_modes_fixed(connector, &imx_bridge->mode); + if (ret) + return ret; + + connector->display_info.bus_flags = imx_bridge->bus_flags; + + return 0; +} + +struct drm_bridge_funcs imx_legacy_bridge_funcs = { + .attach = imx_legacy_bridge_attach, + .get_modes = imx_legacy_bridge_get_modes, +}; + +struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev, + struct device_node *np, + int type) +{ + struct imx_legacy_bridge *imx_bridge; + int ret; + + imx_bridge = devm_kzalloc(dev, sizeof(*imx_bridge), GFP_KERNEL); + if (!imx_bridge) + return ERR_PTR(-ENOMEM); + + ret = of_get_drm_display_mode(np, + &imx_bridge->mode, + &imx_bridge->bus_flags, + OF_USE_NATIVE_MODE); + if (ret) + return ERR_PTR(ret); + + imx_bridge->mode.type |= DRM_MODE_TYPE_DRIVER; + + imx_bridge->base.funcs = &imx_legacy_bridge_funcs; + imx_bridge->base.of_node = np; + imx_bridge->base.ops = DRM_BRIDGE_OP_MODES; + imx_bridge->base.type = type; + + ret = devm_drm_bridge_add(dev, &imx_bridge->base); + if (ret) + return ERR_PTR(ret); + + return &imx_bridge->base; +} +EXPORT_SYMBOL_GPL(devm_imx_drm_legacy_bridge); + +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c new file mode 100644 index 000000000000..3472ed5924e8 --- /dev/null +++ b/drivers/gpu/drm/bridge/ti-tdp158.c @@ -0,0 +1,111 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2024 Freebox SAS + */ + +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> + +struct tdp158 { + struct drm_bridge bridge; + struct drm_bridge *next; + struct gpio_desc *enable; // Operation Enable - pin 36 + struct regulator *vcc; // 3.3V + struct regulator *vdd; // 1.1V + struct device *dev; +}; + +static void tdp158_enable(struct drm_bridge *bridge, struct drm_bridge_state *prev) +{ + int err; + struct tdp158 *tdp158 = bridge->driver_private; + + err = regulator_enable(tdp158->vcc); + if (err) + dev_err(tdp158->dev, "failed to enable vcc: %d", err); + + err = regulator_enable(tdp158->vdd); + if (err) + dev_err(tdp158->dev, "failed to enable vdd: %d", err); + + gpiod_set_value_cansleep(tdp158->enable, 1); +} + +static void tdp158_disable(struct drm_bridge *bridge, struct drm_bridge_state *prev) +{ + struct tdp158 *tdp158 = bridge->driver_private; + + gpiod_set_value_cansleep(tdp158->enable, 0); + regulator_disable(tdp158->vdd); + regulator_disable(tdp158->vcc); +} + +static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) +{ + struct tdp158 *tdp158 = bridge->driver_private; + + return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags); +} + +static const struct drm_bridge_funcs tdp158_bridge_funcs = { + .attach = tdp158_attach, + .atomic_enable = tdp158_enable, + .atomic_disable = tdp158_disable, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +static int tdp158_probe(struct i2c_client *client) +{ + struct tdp158 *tdp158; + struct device *dev = &client->dev; + + tdp158 = devm_kzalloc(dev, sizeof(*tdp158), GFP_KERNEL); + if (!tdp158) + return -ENOMEM; + + tdp158->next = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + if (IS_ERR(tdp158->next)) + return dev_err_probe(dev, PTR_ERR(tdp158->next), "missing bridge"); + + tdp158->vcc = devm_regulator_get(dev, "vcc"); + if (IS_ERR(tdp158->vcc)) + return dev_err_probe(dev, PTR_ERR(tdp158->vcc), "vcc"); + + tdp158->vdd = devm_regulator_get(dev, "vdd"); + if (IS_ERR(tdp158->vdd)) + return dev_err_probe(dev, PTR_ERR(tdp158->vdd), "vdd"); + + tdp158->enable = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); + if (IS_ERR(tdp158->enable)) + return dev_err_probe(dev, PTR_ERR(tdp158->enable), "enable"); + + tdp158->bridge.of_node = dev->of_node; + tdp158->bridge.funcs = &tdp158_bridge_funcs; + tdp158->bridge.driver_private = tdp158; + tdp158->dev = dev; + + return devm_drm_bridge_add(dev, &tdp158->bridge); +} + +static const struct of_device_id tdp158_match_table[] = { + { .compatible = "ti,tdp158" }, + { } +}; +MODULE_DEVICE_TABLE(of, tdp158_match_table); + +static struct i2c_driver tdp158_driver = { + .probe = tdp158_probe, + .driver = { + .name = "tdp158", + .of_match_table = tdp158_match_table, + }, +}; +module_i2c_driver(tdp158_driver); + +MODULE_DESCRIPTION("TI TDP158 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig index 3b824e01c9b5..0f07cf1483ff 100644 --- a/drivers/gpu/drm/display/Kconfig +++ b/drivers/gpu/drm/display/Kconfig @@ -64,6 +64,12 @@ config DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG If in doubt, say "N". +config DRM_DISPLAY_DSC_HELPER + bool + depends on DRM_DISPLAY_HELPER + help + DRM display helpers for VESA DSC (used by DSI and DisplayPort). + config DRM_DISPLAY_HDCP_HELPER bool help diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile index fbb9d2b8acd4..629c834c3192 100644 --- a/drivers/gpu/drm/display/Makefile +++ b/drivers/gpu/drm/display/Makefile @@ -8,10 +8,11 @@ drm_display_helper-$(CONFIG_DRM_BRIDGE_CONNECTOR) += \ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \ drm_dp_dual_mode_helper.o \ drm_dp_helper.o \ - drm_dp_mst_topology.o \ - drm_dsc_helper.o + drm_dp_mst_topology.o drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \ drm_dp_tunnel.o +drm_display_helper-$(CONFIG_DRM_DISPLAY_DSC_HELPER) += \ + drm_dsc_helper.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \ drm_hdmi_helper.o \ diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 6027584406af..22b1fe9c03b8 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -16,7 +16,6 @@ #include <drm/drm_mode.h> #include <drm/drm_plane.h> #include <drm/drm_prime.h> -#include <drm/drm_simple_kms_helper.h> #include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_tt.h> @@ -687,50 +686,6 @@ drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb); /* - * Helpers for struct drm_simple_display_pipe_funcs - */ - -/** - * drm_gem_vram_simple_display_pipe_prepare_fb() - Implements &struct - * drm_simple_display_pipe_funcs.prepare_fb - * @pipe: a simple display pipe - * @new_state: the plane's new state - * - * During plane updates, this function pins the GEM VRAM - * objects of the plane's new framebuffer to VRAM. Call - * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them. - * - * Returns: - * 0 on success, or - * a negative errno code otherwise. - */ -int drm_gem_vram_simple_display_pipe_prepare_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *new_state) -{ - return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state); -} -EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb); - -/** - * drm_gem_vram_simple_display_pipe_cleanup_fb() - Implements &struct - * drm_simple_display_pipe_funcs.cleanup_fb - * @pipe: a simple display pipe - * @old_state: the plane's old state - * - * During plane updates, this function unpins the GEM VRAM - * objects of the plane's old framebuffer from VRAM. Complements - * drm_gem_vram_simple_display_pipe_prepare_fb(). - */ -void drm_gem_vram_simple_display_pipe_cleanup_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state) -{ - drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state); -} -EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb); - -/* * PRIME helpers */ diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 5ace481c1901..ca254611b382 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -151,7 +151,7 @@ static void show_leaks(struct drm_mm *mm) { } INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, u64, __subtree_last, - START, LAST, static inline, drm_mm_interval_tree) + START, LAST, static inline __maybe_unused, drm_mm_interval_tree) struct drm_mm_node * __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) @@ -611,7 +611,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm, } EXPORT_SYMBOL(drm_mm_insert_node_in_range); -static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node) +static inline __maybe_unused bool drm_mm_node_scanned_block(const struct drm_mm_node *node) { return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index ab9ca4824b62..23ced5896c7c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -72,7 +72,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job drm_sched_resubmit_jobs(&gpu->sched); - drm_sched_start(&gpu->sched); + drm_sched_start(&gpu->sched, 0); return DRM_GPU_SCHED_STAT_NOMINAL; out_no_timeout: diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 1e26cd4f8347..c9d4b9146df9 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -883,27 +883,32 @@ static const struct drm_connector_funcs hdmi_connector_funcs = { static int hdmi_get_modes(struct drm_connector *connector) { struct hdmi_context *hdata = connector_to_hdmi(connector); - struct edid *edid; + const struct drm_display_info *info = &connector->display_info; + const struct drm_edid *drm_edid; int ret; if (!hdata->ddc_adpt) goto no_edid; - edid = drm_get_edid(connector, hdata->ddc_adpt); - if (!edid) + drm_edid = drm_edid_read_ddc(connector, hdata->ddc_adpt); + + ret = drm_edid_connector_update(connector, drm_edid); + if (ret) + return 0; + + cec_notifier_set_phys_addr(hdata->notifier, info->source_physical_address); + + if (!drm_edid) goto no_edid; - hdata->dvi_mode = !connector->display_info.is_hdmi; + hdata->dvi_mode = !info->is_hdmi; DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n", (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"), - edid->width_cm, edid->height_cm); - - drm_connector_update_edid_property(connector, edid); - cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid); + info->width_mm / 10, info->height_mm / 10); - ret = drm_add_edid_modes(connector, edid); + ret = drm_edid_connector_add_modes(connector); - kfree(edid); + drm_edid_free(drm_edid); return ret; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index faa253b27664..db400aad88fa 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -11,6 +11,7 @@ config DRM_I915 select SHMEM select TMPFS select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/drm/imagination/pvr_ccb.c b/drivers/gpu/drm/imagination/pvr_ccb.c index 4deeac7ed40a..2bbdc05a3b97 100644 --- a/drivers/gpu/drm/imagination/pvr_ccb.c +++ b/drivers/gpu/drm/imagination/pvr_ccb.c @@ -321,7 +321,7 @@ static int pvr_kccb_reserve_slot_sync(struct pvr_device *pvr_dev) bool reserved = false; u32 retries = 0; - while ((jiffies - start_timestamp) < (u32)RESERVE_SLOT_TIMEOUT || + while (time_before(jiffies, start_timestamp + RESERVE_SLOT_TIMEOUT) || retries < RESERVE_SLOT_MIN_RETRIES) { reserved = pvr_kccb_try_reserve_slot(pvr_dev); if (reserved) diff --git a/drivers/gpu/drm/imagination/pvr_context.c b/drivers/gpu/drm/imagination/pvr_context.c index eded5e955cc0..98327f9bbd9c 100644 --- a/drivers/gpu/drm/imagination/pvr_context.c +++ b/drivers/gpu/drm/imagination/pvr_context.c @@ -69,24 +69,12 @@ process_static_context_state(struct pvr_device *pvr_dev, const struct pvr_stream void *stream; int err; - stream = kzalloc(stream_size, GFP_KERNEL); - if (!stream) - return -ENOMEM; - - if (copy_from_user(stream, u64_to_user_ptr(stream_user_ptr), stream_size)) { - err = -EFAULT; - goto err_free; - } + stream = memdup_user(u64_to_user_ptr(stream_user_ptr), stream_size); + if (IS_ERR(stream)) + return PTR_ERR(stream); err = pvr_stream_process(pvr_dev, cmd_defs, stream, stream_size, dest); - if (err) - goto err_free; - - kfree(stream); - - return 0; -err_free: kfree(stream); return err; diff --git a/drivers/gpu/drm/imagination/pvr_job.c b/drivers/gpu/drm/imagination/pvr_job.c index 78c2f3c6dce0..618503a212a7 100644 --- a/drivers/gpu/drm/imagination/pvr_job.c +++ b/drivers/gpu/drm/imagination/pvr_job.c @@ -90,20 +90,13 @@ static int pvr_fw_cmd_init(struct pvr_device *pvr_dev, struct pvr_job *job, void *stream; int err; - stream = kzalloc(stream_len, GFP_KERNEL); - if (!stream) - return -ENOMEM; - - if (copy_from_user(stream, u64_to_user_ptr(stream_userptr), stream_len)) { - err = -EFAULT; - goto err_free_stream; - } + stream = memdup_user(u64_to_user_ptr(stream_userptr), stream_len); + if (IS_ERR(stream)) + return PTR_ERR(stream); err = pvr_job_process_stream(pvr_dev, stream_def, stream, stream_len, job); -err_free_stream: kfree(stream); - return err; } diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index 20cb46012082..c4f08432882b 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -782,7 +782,7 @@ static void pvr_queue_start(struct pvr_queue *queue) } } - drm_sched_start(&queue->scheduler); + drm_sched_start(&queue->scheduler, 0); } /** @@ -842,7 +842,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job) } mutex_unlock(&pvr_dev->queues.lock); - drm_sched_start(sched); + drm_sched_start(sched, 0); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c index 97c0f772ed65..39773991710a 100644 --- a/drivers/gpu/drm/imagination/pvr_vm.c +++ b/drivers/gpu/drm/imagination/pvr_vm.c @@ -640,9 +640,7 @@ pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle) xa_lock(&pvr_file->vm_ctx_handles); vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle); - if (vm_ctx) - kref_get(&vm_ctx->ref_count); - + pvr_vm_context_get(vm_ctx); xa_unlock(&pvr_file->vm_ctx_handles); return vm_ctx; diff --git a/drivers/gpu/drm/imx/ipuv3/Kconfig b/drivers/gpu/drm/imx/ipuv3/Kconfig index bacf0655ebaf..5a40c878ebb0 100644 --- a/drivers/gpu/drm/imx/ipuv3/Kconfig +++ b/drivers/gpu/drm/imx/ipuv3/Kconfig @@ -11,8 +11,9 @@ config DRM_IMX config DRM_IMX_PARALLEL_DISPLAY tristate "Support for parallel displays" - select DRM_PANEL depends on DRM_IMX + select DRM_BRIDGE + select DRM_PANEL_BRIDGE select VIDEOMODE_HELPERS config DRM_IMX_TVE @@ -26,9 +27,12 @@ config DRM_IMX_TVE config DRM_IMX_LDB tristate "Support for LVDS displays" - depends on DRM_IMX && MFD_SYSCON + depends on DRM_IMX depends on COMMON_CLK - select DRM_PANEL + select MFD_SYSCON + select DRM_BRIDGE + select DRM_PANEL_BRIDGE + select DRM_IMX_LEGACY_BRIDGE help Choose this to enable the internal LVDS Display Bridge (LDB) found on i.MX53 and i.MX6 processors. diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c index 4cfabcf7375a..189d395349b8 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c @@ -34,13 +34,6 @@ module_param(legacyfb_depth, int, 0444); DEFINE_DRM_GEM_DMA_FOPS(imx_drm_driver_fops); -void imx_drm_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} -EXPORT_SYMBOL_GPL(imx_drm_connector_destroy); - static int imx_drm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm.h b/drivers/gpu/drm/imx/ipuv3/imx-drm.h index e721bebda2bd..0c85bf83ffbf 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-drm.h +++ b/drivers/gpu/drm/imx/ipuv3/imx-drm.h @@ -3,14 +3,9 @@ #define _IMX_DRM_H_ struct device_node; -struct drm_crtc; struct drm_connector; struct drm_device; -struct drm_display_mode; struct drm_encoder; -struct drm_framebuffer; -struct drm_plane; -struct platform_device; struct imx_crtc_state { struct drm_crtc_state base; @@ -24,21 +19,12 @@ static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s) { return container_of(s, struct imx_crtc_state, base); } -int imx_drm_init_drm(struct platform_device *pdev, - int preferred_bpp); -int imx_drm_exit_drm(void); extern struct platform_driver ipu_drm_driver; -void imx_drm_mode_config_init(struct drm_device *drm); - -struct drm_gem_dma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); - int imx_drm_encoder_parse_of(struct drm_device *drm, struct drm_encoder *encoder, struct device_node *np); -void imx_drm_connector_destroy(struct drm_connector *connector); - int ipu_planes_assign_pre(struct drm_device *dev, struct drm_atomic_state *state); diff --git a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c index 793dfb1a3ed0..ff74018ac5cd 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-ldb.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-ldb.c @@ -19,19 +19,16 @@ #include <linux/regmap.h> #include <linux/videodev2.h> -#include <video/of_display_timing.h> -#include <video/of_videomode.h> - #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> -#include <drm/drm_edid.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> -#include <drm/drm_panel.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/bridge/imx.h> #include "imx-drm.h" @@ -55,7 +52,6 @@ struct imx_ldb_channel; struct imx_ldb_encoder { - struct drm_connector connector; struct drm_encoder encoder; struct imx_ldb_channel *channel; }; @@ -65,25 +61,13 @@ struct imx_ldb; struct imx_ldb_channel { struct imx_ldb *ldb; - /* Defines what is connected to the ldb, only one at a time */ - struct drm_panel *panel; struct drm_bridge *bridge; struct device_node *child; - struct i2c_adapter *ddc; int chno; - const struct drm_edid *drm_edid; - struct drm_display_mode mode; - int mode_valid; u32 bus_format; - u32 bus_flags; }; -static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c) -{ - return container_of(c, struct imx_ldb_encoder, connector)->channel; -} - static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e) { return container_of(e, struct imx_ldb_encoder, encoder)->channel; @@ -133,38 +117,6 @@ static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch, } } -static int imx_ldb_connector_get_modes(struct drm_connector *connector) -{ - struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector); - int num_modes; - - num_modes = drm_panel_get_modes(imx_ldb_ch->panel, connector); - if (num_modes > 0) - return num_modes; - - if (!imx_ldb_ch->drm_edid && imx_ldb_ch->ddc) { - imx_ldb_ch->drm_edid = drm_edid_read_ddc(connector, - imx_ldb_ch->ddc); - drm_edid_connector_update(connector, imx_ldb_ch->drm_edid); - } - - if (imx_ldb_ch->drm_edid) - num_modes = drm_edid_connector_add_modes(connector); - - if (imx_ldb_ch->mode_valid) { - struct drm_display_mode *mode; - - mode = drm_mode_duplicate(connector->dev, &imx_ldb_ch->mode); - if (!mode) - return -EINVAL; - mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); - num_modes++; - } - - return num_modes; -} - static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno, unsigned long serial_clk, unsigned long di_clk) { @@ -205,8 +157,6 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) return; } - drm_panel_prepare(imx_ldb_ch->panel); - if (dual) { clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]); clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]); @@ -245,8 +195,6 @@ static void imx_ldb_encoder_enable(struct drm_encoder *encoder) } regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); - - drm_panel_enable(imx_ldb_ch->panel); } static void @@ -323,8 +271,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN; int mux, ret; - drm_panel_disable(imx_ldb_ch->panel); - if (imx_ldb_ch == &ldb->channel[0] || dual) ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; if (imx_ldb_ch == &ldb->channel[1] || dual) @@ -358,8 +304,6 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder) dev_err(ldb->dev, "unable to set di%d parent clock to original parent\n", mux); - - drm_panel_unprepare(imx_ldb_ch->panel); } static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, @@ -374,11 +318,12 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, /* Bus format description in DT overrides connector display info. */ if (!bus_format && di->num_bus_formats) { bus_format = di->bus_formats[0]; - imx_crtc_state->bus_flags = di->bus_flags; } else { bus_format = imx_ldb_ch->bus_format; - imx_crtc_state->bus_flags = imx_ldb_ch->bus_flags; } + + imx_crtc_state->bus_flags = di->bus_flags; + switch (bus_format) { case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG: imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18; @@ -398,18 +343,6 @@ static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder, } -static const struct drm_connector_funcs imx_ldb_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = imx_drm_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = { - .get_modes = imx_ldb_connector_get_modes, -}; - static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { .atomic_mode_set = imx_ldb_encoder_atomic_mode_set, .enable = imx_ldb_encoder_enable, @@ -447,7 +380,6 @@ static int imx_ldb_register(struct drm_device *drm, return PTR_ERR(ldb_encoder); ldb_encoder->channel = imx_ldb_ch; - connector = &ldb_encoder->connector; encoder = &ldb_encoder->encoder; ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child); @@ -466,25 +398,16 @@ static int imx_ldb_register(struct drm_device *drm, drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs); - if (imx_ldb_ch->bridge) { - ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0); - if (ret) - return ret; - } else { - /* - * We want to add the connector whenever there is no bridge - * that brings its own, not only when there is a panel. For - * historical reasons, the ldb driver can also work without - * a panel. - */ - drm_connector_helper_add(connector, - &imx_ldb_connector_helper_funcs); - drm_connector_init_with_ddc(drm, connector, - &imx_ldb_connector_funcs, - DRM_MODE_CONNECTOR_LVDS, - imx_ldb_ch->ddc); - drm_connector_attach_encoder(connector, encoder); - } + ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ret; + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) + return PTR_ERR(connector); + + drm_connector_attach_encoder(connector, encoder); return 0; } @@ -549,47 +472,6 @@ static const struct of_device_id imx_ldb_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, imx_ldb_dt_ids); -static int imx_ldb_panel_ddc(struct device *dev, - struct imx_ldb_channel *channel, struct device_node *child) -{ - struct device_node *ddc_node; - int ret; - - ddc_node = of_parse_phandle(child, "ddc-i2c-bus", 0); - if (ddc_node) { - channel->ddc = of_find_i2c_adapter_by_node(ddc_node); - of_node_put(ddc_node); - if (!channel->ddc) { - dev_warn(dev, "failed to get ddc i2c adapter\n"); - return -EPROBE_DEFER; - } - } - - if (!channel->ddc) { - const void *edidp; - int edid_len; - - /* if no DDC available, fallback to hardcoded EDID */ - dev_dbg(dev, "no ddc available\n"); - - edidp = of_get_property(child, "edid", &edid_len); - if (edidp) { - channel->drm_edid = drm_edid_alloc(edidp, edid_len); - if (!channel->drm_edid) - return -ENOMEM; - } else if (!channel->panel) { - /* fallback to display-timings node */ - ret = of_get_drm_display_mode(child, - &channel->mode, - &channel->bus_flags, - OF_USE_NATIVE_MODE); - if (!ret) - channel->mode_valid = 1; - } - } - return 0; -} - static int imx_ldb_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; @@ -694,29 +576,22 @@ static int imx_ldb_probe(struct platform_device *pdev) * The output port is port@4 with an external 4-port mux or * port@2 with the internal 2-port mux. */ - ret = drm_of_find_panel_or_bridge(child, - imx_ldb->lvds_mux ? 4 : 2, 0, - &channel->panel, &channel->bridge); - if (ret && ret != -ENODEV) - goto free_child; - - /* panel ddc only if there is no bridge */ - if (!channel->bridge) { - ret = imx_ldb_panel_ddc(dev, channel, child); - if (ret) + channel->bridge = devm_drm_of_get_bridge(dev, child, + imx_ldb->lvds_mux ? 4 : 2, 0); + if (IS_ERR(channel->bridge)) { + ret = PTR_ERR(channel->bridge); + if (ret != -ENODEV) goto free_child; + channel->bridge = NULL; } bus_format = of_get_bus_format(dev, child); - if (bus_format == -EINVAL) { - /* - * If no bus format was specified in the device tree, - * we can still get it from the connected panel later. - */ - if (channel->panel && channel->panel->funcs && - channel->panel->funcs->get_modes) - bus_format = 0; - } + /* + * If no bus format was specified in the device tree, + * we can still get it from the connected panel later. + */ + if (bus_format == -EINVAL && channel->bridge) + bus_format = 0; if (bus_format < 0) { dev_err(dev, "could not determine data mapping: %d\n", bus_format); @@ -724,6 +599,20 @@ static int imx_ldb_probe(struct platform_device *pdev) goto free_child; } channel->bus_format = bus_format; + + /* + * legacy bridge doesn't handle bus_format, so create it after + * checking the bus_format property. + */ + if (!channel->bridge) { + channel->bridge = devm_imx_drm_legacy_bridge(dev, child, + DRM_MODE_CONNECTOR_LVDS); + if (IS_ERR(channel->bridge)) { + ret = PTR_ERR(channel->bridge); + goto free_child; + } + } + channel->child = child; } @@ -738,16 +627,6 @@ free_child: static void imx_ldb_remove(struct platform_device *pdev) { - struct imx_ldb *imx_ldb = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < 2; i++) { - struct imx_ldb_channel *channel = &imx_ldb->channel[i]; - - drm_edid_free(channel->drm_edid); - i2c_put_adapter(channel->ddc); - } - component_del(&pdev->dev, &imx_ldb_ops); } diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c index 29f494bfff67..d46d07d25f51 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c @@ -305,9 +305,15 @@ static int imx_tve_atomic_check(struct drm_encoder *encoder, return 0; } +static void imx_tve_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + static const struct drm_connector_funcs imx_tve_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = imx_drm_connector_destroy, + .destroy = imx_tve_connector_destroy, .reset = drm_atomic_helper_connector_reset, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 91d7808a2d8d..70f62e89622e 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -12,21 +12,18 @@ #include <linux/platform_device.h> #include <linux/videodev2.h> -#include <video/of_display_timing.h> - #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> -#include <drm/drm_edid.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_managed.h> #include <drm/drm_of.h> -#include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/bridge/imx.h> #include "imx-drm.h" struct imx_parallel_display_encoder { - struct drm_connector connector; struct drm_encoder encoder; struct drm_bridge bridge; struct imx_parallel_display *pd; @@ -34,79 +31,15 @@ struct imx_parallel_display_encoder { struct imx_parallel_display { struct device *dev; - const struct drm_edid *drm_edid; u32 bus_format; - u32 bus_flags; - struct drm_display_mode mode; - struct drm_panel *panel; struct drm_bridge *next_bridge; }; -static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c) -{ - return container_of(c, struct imx_parallel_display_encoder, connector)->pd; -} - static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b) { return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; } -static int imx_pd_connector_get_modes(struct drm_connector *connector) -{ - struct imx_parallel_display *imxpd = con_to_imxpd(connector); - struct device_node *np = imxpd->dev->of_node; - int num_modes; - - num_modes = drm_panel_get_modes(imxpd->panel, connector); - if (num_modes > 0) - return num_modes; - - if (imxpd->drm_edid) { - drm_edid_connector_update(connector, imxpd->drm_edid); - num_modes = drm_edid_connector_add_modes(connector); - } - - if (np) { - struct drm_display_mode *mode = drm_mode_create(connector->dev); - int ret; - - if (!mode) - return 0; - - ret = of_get_drm_display_mode(np, &imxpd->mode, - &imxpd->bus_flags, - OF_USE_NATIVE_MODE); - if (ret) { - drm_mode_destroy(connector->dev, mode); - return 0; - } - - drm_mode_copy(mode, &imxpd->mode); - mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_probed_add(connector, mode); - num_modes++; - } - - return num_modes; -} - -static void imx_pd_bridge_enable(struct drm_bridge *bridge) -{ - struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); - - drm_panel_prepare(imxpd->panel); - drm_panel_enable(imxpd->panel); -} - -static void imx_pd_bridge_disable(struct drm_bridge *bridge) -{ - struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); - - drm_panel_disable(imxpd->panel); - drm_panel_unprepare(imxpd->panel); -} - static const u32 imx_pd_bus_fmts[] = { MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_BGR888_1X24, @@ -200,7 +133,6 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, { struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state); struct drm_display_info *di = &conn_state->connector->display_info; - struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); struct drm_bridge_state *next_bridge_state = NULL; struct drm_bridge *next_bridge; u32 bus_flags, bus_fmt; @@ -212,10 +144,8 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, if (next_bridge_state) bus_flags = next_bridge_state->input_bus_cfg.flags; - else if (di->num_bus_formats) - bus_flags = di->bus_flags; else - bus_flags = imxpd->bus_flags; + bus_flags = di->bus_flags; bus_fmt = bridge_state->input_bus_cfg.format; if (!imx_pd_format_supported(bus_fmt)) @@ -231,21 +161,16 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, return 0; } -static const struct drm_connector_funcs imx_pd_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = imx_drm_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; +static int imx_pd_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); -static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { - .get_modes = imx_pd_connector_get_modes, -}; + return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags); +} static const struct drm_bridge_funcs imx_pd_bridge_funcs = { - .enable = imx_pd_bridge_enable, - .disable = imx_pd_bridge_disable, + .attach = imx_pd_bridge_attach, .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, @@ -270,7 +195,6 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(imxpd_encoder); imxpd_encoder->pd = imxpd; - connector = &imxpd_encoder->connector; encoder = &imxpd_encoder->encoder; bridge = &imxpd_encoder->bridge; @@ -278,28 +202,14 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - /* set the connector's dpms to OFF so that - * drm_helper_connector_dpms() won't return - * immediately since the current state is ON - * at this point. - */ - connector->dpms = DRM_MODE_DPMS_OFF; - bridge->funcs = &imx_pd_bridge_funcs; - drm_bridge_attach(encoder, bridge, NULL, 0); - - if (imxpd->next_bridge) { - ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0); - if (ret < 0) - return ret; - } else { - drm_connector_helper_add(connector, - &imx_pd_connector_helper_funcs); - drm_connector_init(drm, connector, &imx_pd_connector_funcs, - DRM_MODE_CONNECTOR_DPI); - - drm_connector_attach_encoder(connector, encoder); - } + drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + + connector = drm_bridge_connector_init(drm, encoder); + if (IS_ERR(connector)) + return PTR_ERR(connector); + + drm_connector_attach_encoder(connector, encoder); return 0; } @@ -312,9 +222,7 @@ static int imx_pd_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const u8 *edidp; struct imx_parallel_display *imxpd; - int edid_len; int ret; u32 bus_format = 0; const char *fmt; @@ -324,14 +232,13 @@ static int imx_pd_probe(struct platform_device *pdev) return -ENOMEM; /* port@1 is the output port */ - ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, - &imxpd->next_bridge); - if (ret && ret != -ENODEV) + imxpd->next_bridge = devm_drm_of_get_bridge(dev, np, 1, 0); + if (imxpd->next_bridge == ERR_PTR(-ENODEV)) + imxpd->next_bridge = devm_imx_drm_legacy_bridge(dev, np, DRM_MODE_CONNECTOR_DPI); + if (IS_ERR(imxpd->next_bridge)) { + ret = PTR_ERR(imxpd->next_bridge); return ret; - - edidp = of_get_property(np, "edid", &edid_len); - if (edidp) - imxpd->drm_edid = drm_edid_alloc(edidp, edid_len); + } ret = of_property_read_string(np, "interface-pix-fmt", &fmt); if (!ret) { @@ -355,11 +262,7 @@ static int imx_pd_probe(struct platform_device *pdev) static void imx_pd_remove(struct platform_device *pdev) { - struct imx_parallel_display *imxpd = platform_get_drvdata(pdev); - component_del(&pdev->dev, &imx_pd_ops); - - drm_edid_free(imxpd->drm_edid); } static const struct of_device_id imx_pd_dt_ids[] = { diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 1a944edb6ddc..b40c90e97d7e 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -463,7 +463,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job lima_pm_idle(ldev); drm_sched_resubmit_jobs(&pipe->base); - drm_sched_start(&pipe->base); + drm_sched_start(&pipe->base, 0); return DRM_GPU_SCHED_STAT_NOMINAL; } diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 90c68106b63b..c8dda0ebd043 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -92,6 +92,7 @@ config DRM_MSM_DPU bool "Enable DPU support in MSM DRM driver" depends on DRM_MSM select DRM_MSM_MDSS + select DRM_DISPLAY_DSC_HELPER default y help Compile in support for the Display Processing Unit in @@ -113,6 +114,7 @@ config DRM_MSM_DSI depends on DRM_MSM select DRM_PANEL select DRM_MIPI_DSI + select DRM_DISPLAY_DSC_HELPER default y help Choose this option if you have a need for MIPI DSI connector diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index b06aa473102b..8d5c9c74cbb9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -477,14 +477,14 @@ nouveau_connector_of_detect(struct drm_connector *connector) struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_encoder *nv_encoder; struct pci_dev *pdev = to_pci_dev(dev->dev); - struct device_node *cn, *dn = pci_device_to_OF_node(pdev); + struct device_node *dn = pci_device_to_OF_node(pdev); if (!dn || !((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) || (nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG)))) return NULL; - for_each_child_of_node(dn, cn) { + for_each_child_of_node_scoped(dn, cn) { const char *name = of_get_property(cn, "name", NULL); const void *edid = of_get_property(cn, "EDID", NULL); int idx = name ? name[strlen(name) - 1] - 'A' : 0; @@ -492,7 +492,6 @@ nouveau_connector_of_detect(struct drm_connector *connector) if (nv_encoder->dcb->i2c_index == idx && edid) { nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL); - of_node_put(cn); return nv_encoder; } } diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index eb6c3f9a01f5..4412f2711fb5 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -379,7 +379,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job) else NV_PRINTK(warn, job->cli, "Generic job timeout.\n"); - drm_sched_start(sched); + drm_sched_start(sched, 0); return stat; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index d1c294f00665..78a83f904bbd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -120,8 +120,8 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) mutex_init(&tdev->iommu.mutex); if (device_iommu_mapped(dev)) { - tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); - if (!tdev->iommu.domain) + tdev->iommu.domain = iommu_paging_domain_alloc(dev); + if (IS_ERR(tdev->iommu.domain)) goto error; /* diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c index a17a6dd8d3de..803b98df4858 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c @@ -142,7 +142,7 @@ nvkm_volt_map(struct nvkm_volt *volt, u8 id, u8 temp) return -ENODEV; } - result = min(max(result, (s64)info.min), (s64)info.max); + result = clamp(result, (s64)info.min, (s64)info.max); if (info.link != 0xff) { int ret = nvkm_volt_map(volt, info.link, temp); diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index d3a9a9fafe4e..1077fd2307d4 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -378,7 +378,7 @@ config DRM_PANEL_LG_SW43408 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE - select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HELPER help Say Y here if you want to enable support for LG sw43408 panel. @@ -587,7 +587,7 @@ config DRM_PANEL_RAYDIUM_RM692E5 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE - select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HELPER help Say Y here if you want to enable support for Raydium RM692E5-based @@ -946,7 +946,7 @@ config DRM_PANEL_VISIONOX_R66451 depends on OF depends on DRM_MIPI_DSI depends on BACKLIGHT_CLASS_DEVICE - select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HELPER help Say Y here if you want to enable support for Visionox diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c index 1fbc5d433d75..ff39f5dd4097 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9341.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9341.c @@ -13,9 +13,6 @@ * Derived from drivers/drm/gpu/panel/panel-ilitek-ili9322.c * the reuse of DBI abstraction part referred from Linus's patch * "drm/panel: s6e63m0: Switch to DBI abstraction for SPI" - * - * For only-dbi part, copy from David's code (drm/tiny/ili9341.c) - * Copyright 2018 David Lechner <david@lechnology.com> */ #include <linux/backlight.h> @@ -486,176 +483,6 @@ static const struct drm_panel_funcs ili9341_dpi_funcs = { .get_modes = ili9341_dpi_get_modes, }; -static void ili9341_dbi_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state, - struct drm_plane_state *plane_state) -{ - struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev); - struct mipi_dbi *dbi = &dbidev->dbi; - u8 addr_mode; - int ret, idx; - - if (!drm_dev_enter(pipe->crtc.dev, &idx)) - return; - - ret = mipi_dbi_poweron_conditional_reset(dbidev); - if (ret < 0) - goto out_exit; - if (ret == 1) - goto out_enable; - - mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); - - mipi_dbi_command(dbi, ILI9341_POWERB, 0x00, 0xc1, 0x30); - mipi_dbi_command(dbi, ILI9341_POWER_SEQ, 0x64, 0x03, 0x12, 0x81); - mipi_dbi_command(dbi, ILI9341_DTCA, 0x85, 0x00, 0x78); - mipi_dbi_command(dbi, ILI9341_POWERA, 0x39, 0x2c, 0x00, 0x34, 0x02); - mipi_dbi_command(dbi, ILI9341_PRC, ILI9341_DBI_PRC_NORMAL); - mipi_dbi_command(dbi, ILI9341_DTCB, 0x00, 0x00); - - /* Power Control */ - mipi_dbi_command(dbi, ILI9341_POWER1, ILI9341_DBI_VCOMH_4P6V); - mipi_dbi_command(dbi, ILI9341_POWER2, ILI9341_DBI_PWR_2_DEFAULT); - /* VCOM */ - mipi_dbi_command(dbi, ILI9341_VCOM1, ILI9341_DBI_VCOM_1_VMH_4P25V, - ILI9341_DBI_VCOM_1_VML_1P5V); - mipi_dbi_command(dbi, ILI9341_VCOM2, ILI9341_DBI_VCOM_2_DEC_58); - - /* Memory Access Control */ - mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, - MIPI_DCS_PIXEL_FMT_16BIT); - - /* Frame Rate */ - mipi_dbi_command(dbi, ILI9341_FRC, ILI9341_DBI_FRC_DIVA & 0x03, - ILI9341_DBI_FRC_RTNA & 0x1f); - - /* Gamma */ - mipi_dbi_command(dbi, ILI9341_3GAMMA_EN, 0x00); - mipi_dbi_command(dbi, MIPI_DCS_SET_GAMMA_CURVE, ILI9341_GAMMA_CURVE_1); - mipi_dbi_command(dbi, ILI9341_PGAMMA, - 0x0f, 0x31, 0x2b, 0x0c, 0x0e, 0x08, 0x4e, 0xf1, - 0x37, 0x07, 0x10, 0x03, 0x0e, 0x09, 0x00); - mipi_dbi_command(dbi, ILI9341_NGAMMA, - 0x00, 0x0e, 0x14, 0x03, 0x11, 0x07, 0x31, 0xc1, - 0x48, 0x08, 0x0f, 0x0c, 0x31, 0x36, 0x0f); - - /* DDRAM */ - mipi_dbi_command(dbi, ILI9341_ETMOD, ILI9341_DBI_EMS_GAS | - ILI9341_DBI_EMS_DTS | - ILI9341_DBI_EMS_GON); - - /* Display */ - mipi_dbi_command(dbi, ILI9341_DFC, 0x08, 0x82, 0x27, 0x00); - mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); - msleep(100); - - mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); - msleep(100); - -out_enable: - switch (dbidev->rotation) { - default: - addr_mode = ILI9341_MADCTL_MX; - break; - case 90: - addr_mode = ILI9341_MADCTL_MV; - break; - case 180: - addr_mode = ILI9341_MADCTL_MY; - break; - case 270: - addr_mode = ILI9341_MADCTL_MV | ILI9341_MADCTL_MY | - ILI9341_MADCTL_MX; - break; - } - - addr_mode |= ILI9341_MADCTL_BGR; - mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode); - mipi_dbi_enable_flush(dbidev, crtc_state, plane_state); - drm_info(&dbidev->drm, "Initialized display serial interface\n"); -out_exit: - drm_dev_exit(idx); -} - -static const struct drm_simple_display_pipe_funcs ili9341_dbi_funcs = { - DRM_MIPI_DBI_SIMPLE_DISPLAY_PIPE_FUNCS(ili9341_dbi_enable), -}; - -static const struct drm_display_mode ili9341_dbi_mode = { - DRM_SIMPLE_MODE(240, 320, 37, 49), -}; - -DEFINE_DRM_GEM_DMA_FOPS(ili9341_dbi_fops); - -static struct drm_driver ili9341_dbi_driver = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - .fops = &ili9341_dbi_fops, - DRM_GEM_DMA_DRIVER_OPS_VMAP, - .debugfs_init = mipi_dbi_debugfs_init, - .name = "ili9341", - .desc = "Ilitek ILI9341", - .date = "20210716", - .major = 1, - .minor = 0, -}; - -static int ili9341_dbi_probe(struct spi_device *spi, struct gpio_desc *dc, - struct gpio_desc *reset) -{ - struct device *dev = &spi->dev; - struct mipi_dbi_dev *dbidev; - struct mipi_dbi *dbi; - struct drm_device *drm; - struct regulator *vcc; - u32 rotation = 0; - int ret; - - vcc = devm_regulator_get_optional(dev, "vcc"); - if (IS_ERR(vcc)) { - dev_err(dev, "get optional vcc failed\n"); - vcc = NULL; - } - - dbidev = devm_drm_dev_alloc(dev, &ili9341_dbi_driver, - struct mipi_dbi_dev, drm); - if (IS_ERR(dbidev)) - return PTR_ERR(dbidev); - - dbi = &dbidev->dbi; - drm = &dbidev->drm; - dbi->reset = reset; - dbidev->regulator = vcc; - - drm_mode_config_init(drm); - - dbidev->backlight = devm_of_find_backlight(dev); - if (IS_ERR(dbidev->backlight)) - return PTR_ERR(dbidev->backlight); - - device_property_read_u32(dev, "rotation", &rotation); - - ret = mipi_dbi_spi_init(spi, dbi, dc); - if (ret) - return ret; - - ret = mipi_dbi_dev_init(dbidev, &ili9341_dbi_funcs, - &ili9341_dbi_mode, rotation); - if (ret) - return ret; - - drm_mode_config_reset(drm); - - ret = drm_dev_register(drm, 0); - if (ret) - return ret; - - spi_set_drvdata(spi, drm); - - drm_fbdev_dma_setup(drm, 0); - - return 0; -} - static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc, struct gpio_desc *reset) { @@ -711,7 +538,6 @@ static int ili9341_probe(struct spi_device *spi) struct device *dev = &spi->dev; struct gpio_desc *dc; struct gpio_desc *reset; - const struct spi_device_id *id = spi_get_device_id(spi); reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(reset)) @@ -721,36 +547,15 @@ static int ili9341_probe(struct spi_device *spi) if (IS_ERR(dc)) return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n"); - if (!strcmp(id->name, "sf-tc240t-9370-t")) - return ili9341_dpi_probe(spi, dc, reset); - - if (!strcmp(id->name, "yx240qv29")) - return ili9341_dbi_probe(spi, dc, reset); - - return -ENODEV; + return ili9341_dpi_probe(spi, dc, reset); } static void ili9341_remove(struct spi_device *spi) { - const struct spi_device_id *id = spi_get_device_id(spi); struct ili9341 *ili = spi_get_drvdata(spi); - struct drm_device *drm = spi_get_drvdata(spi); - - if (!strcmp(id->name, "sf-tc240t-9370-t")) { - ili9341_dpi_power_off(ili); - drm_panel_remove(&ili->panel); - } else if (!strcmp(id->name, "yx240qv29")) { - drm_dev_unplug(drm); - drm_atomic_helper_shutdown(drm); - } -} -static void ili9341_shutdown(struct spi_device *spi) -{ - const struct spi_device_id *id = spi_get_device_id(spi); - - if (!strcmp(id->name, "yx240qv29")) - drm_atomic_helper_shutdown(spi_get_drvdata(spi)); + ili9341_dpi_power_off(ili); + drm_panel_remove(&ili->panel); } static const struct of_device_id ili9341_of_match[] = { @@ -758,19 +563,11 @@ static const struct of_device_id ili9341_of_match[] = { .compatible = "st,sf-tc240t-9370-t", .data = &ili9341_stm32f429_disco_data, }, - { - /* porting from tiny/ili9341.c - * for original mipi dbi compitable - */ - .compatible = "adafruit,yx240qv29", - .data = NULL, - }, { } }; MODULE_DEVICE_TABLE(of, ili9341_of_match); static const struct spi_device_id ili9341_id[] = { - { "yx240qv29", 0 }, { "sf-tc240t-9370-t", 0 }, { } }; @@ -779,7 +576,6 @@ MODULE_DEVICE_TABLE(spi, ili9341_id); static struct spi_driver ili9341_driver = { .probe = ili9341_probe, .remove = ili9341_remove, - .shutdown = ili9341_shutdown, .id_table = ili9341_id, .driver = { .name = "panel-ilitek-ili9341", diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c index 10bc8fb5f1f9..27a059b55ae5 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3fa7.c @@ -38,57 +38,38 @@ static void s6e3fa7_panel_reset(struct s6e3fa7_panel *ctx) usleep_range(10000, 11000); } -static int s6e3fa7_panel_on(struct s6e3fa7_panel *ctx) +static int s6e3fa7_panel_on(struct mipi_dsi_device *dsi) { - struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - msleep(120); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret < 0) { - dev_err(dev, "Failed to set tear on: %d\n", ret); - return ret; - } + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf4, + 0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0, + 0x00, 0xb4, 0x37, 0x70, 0x79, 0x69); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf0, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); - mipi_dsi_dcs_write_seq(dsi, 0xf4, - 0xbb, 0x23, 0x19, 0x3a, 0x9f, 0x0f, 0x09, 0xc0, - 0x00, 0xb4, 0x37, 0x70, 0x79, 0x69); - mipi_dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - - return 0; + return dsi_ctx.accum_err; } static int s6e3fa7_panel_prepare(struct drm_panel *panel) { struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel); - struct device *dev = &ctx->dsi->dev; int ret; s6e3fa7_panel_reset(ctx); - ret = s6e3fa7_panel_on(ctx); - if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); + ret = s6e3fa7_panel_on(ctx->dsi); + if (ret < 0) gpiod_set_value_cansleep(ctx->reset_gpio, 1); - return ret; - } - return 0; + return ret; } static int s6e3fa7_panel_unprepare(struct drm_panel *panel) @@ -104,23 +85,13 @@ static int s6e3fa7_panel_disable(struct drm_panel *panel) { struct s6e3fa7_panel *ctx = to_s6e3fa7_panel(panel); struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; - - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - msleep(120); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); - return 0; + return dsi_ctx.accum_err; } static const struct drm_display_mode s6e3fa7_panel_mode = { diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 671eed4ad890..04d615df5259 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -3,6 +3,10 @@ /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ /* Copyright 2019 Collabora ltd. */ +#ifdef CONFIG_ARM_ARCH_TIMER +#include <asm/arch_timer.h> +#endif + #include <linux/module.h> #include <linux/of.h> #include <linux/pagemap.h> @@ -21,13 +25,33 @@ #include "panfrost_gpu.h" #include "panfrost_perfcnt.h" +#define JOB_REQUIREMENTS (PANFROST_JD_REQ_FS | PANFROST_JD_REQ_CYCLE_COUNT) + static bool unstable_ioctls; module_param_unsafe(unstable_ioctls, bool, 0600); +static int panfrost_ioctl_query_timestamp(struct panfrost_device *pfdev, + u64 *arg) +{ + int ret; + + ret = pm_runtime_resume_and_get(pfdev->dev); + if (ret) + return ret; + + panfrost_cycle_counter_get(pfdev); + *arg = panfrost_timestamp_read(pfdev); + panfrost_cycle_counter_put(pfdev); + + pm_runtime_put(pfdev->dev); + return 0; +} + static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file) { struct drm_panfrost_get_param *param = data; struct panfrost_device *pfdev = ddev->dev_private; + int ret; if (param->pad != 0) return -EINVAL; @@ -69,6 +93,21 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15); PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups); PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc); + + case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP: + ret = panfrost_ioctl_query_timestamp(pfdev, ¶m->value); + if (ret) + return ret; + break; + + case DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY: +#ifdef CONFIG_ARM_ARCH_TIMER + param->value = arch_timer_get_cntfrq(); +#else + param->value = 0; +#endif + break; + default: return -EINVAL; } @@ -245,7 +284,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, if (!args->jc) return -EINVAL; - if (args->requirements && args->requirements != PANFROST_JD_REQ_FS) + if (args->requirements & ~JOB_REQUIREMENTS) return -EINVAL; if (args->out_sync > 0) { @@ -584,6 +623,8 @@ static const struct file_operations panfrost_drm_driver_fops = { * - 1.0 - initial interface * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO * - 1.2 - adds AFBC_FEATURES query + * - 1.3 - adds JD_REQ_CYCLE_COUNT job requirement for SUBMIT + * - adds SYSTEM_TIMESTAMP and SYSTEM_TIMESTAMP_FREQUENCY queries */ static const struct drm_driver panfrost_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, @@ -597,7 +638,7 @@ static const struct drm_driver panfrost_drm_driver = { .desc = "panfrost DRM", .date = "20180908", .major = 1, - .minor = 2, + .minor = 3, .gem_create_object = panfrost_gem_create_object, .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table, diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index fd8e44992184..f19f918e2330 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -380,6 +380,18 @@ unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev) return ((u64)hi << 32) | lo; } +unsigned long long panfrost_timestamp_read(struct panfrost_device *pfdev) +{ + u32 hi, lo; + + do { + hi = gpu_read(pfdev, GPU_TIMESTAMP_HI); + lo = gpu_read(pfdev, GPU_TIMESTAMP_LO); + } while (hi != gpu_read(pfdev, GPU_TIMESTAMP_HI)); + + return ((u64)hi << 32) | lo; +} + static u64 panfrost_get_core_mask(struct panfrost_device *pfdev) { u64 core_mask; diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h index d841b86504ea..b4fef11211d5 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.h +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h @@ -20,6 +20,7 @@ void panfrost_gpu_suspend_irq(struct panfrost_device *pfdev); void panfrost_cycle_counter_get(struct panfrost_device *pfdev); void panfrost_cycle_counter_put(struct panfrost_device *pfdev); unsigned long long panfrost_cycle_counter_read(struct panfrost_device *pfdev); +unsigned long long panfrost_timestamp_read(struct panfrost_device *pfdev); void panfrost_gpu_amlogic_quirk(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index df49d37d0e7e..9b8e82fb8bc4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -159,16 +159,17 @@ panfrost_dequeue_job(struct panfrost_device *pfdev, int slot) struct panfrost_job *job = pfdev->jobs[slot][0]; WARN_ON(!job); - if (job->is_profiled) { - if (job->engine_usage) { - job->engine_usage->elapsed_ns[slot] += - ktime_to_ns(ktime_sub(ktime_get(), job->start_time)); - job->engine_usage->cycles[slot] += - panfrost_cycle_counter_read(pfdev) - job->start_cycles; - } - panfrost_cycle_counter_put(job->pfdev); + + if (job->is_profiled && job->engine_usage) { + job->engine_usage->elapsed_ns[slot] += + ktime_to_ns(ktime_sub(ktime_get(), job->start_time)); + job->engine_usage->cycles[slot] += + panfrost_cycle_counter_read(pfdev) - job->start_cycles; } + if (job->requirements & PANFROST_JD_REQ_CYCLE_COUNT || job->is_profiled) + panfrost_cycle_counter_put(pfdev); + pfdev->jobs[slot][0] = pfdev->jobs[slot][1]; pfdev->jobs[slot][1] = NULL; @@ -243,9 +244,13 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) subslot = panfrost_enqueue_job(pfdev, js, job); /* Don't queue the job if a reset is in progress */ if (!atomic_read(&pfdev->reset.pending)) { - if (pfdev->profile_mode) { + job->is_profiled = pfdev->profile_mode; + + if (job->requirements & PANFROST_JD_REQ_CYCLE_COUNT || + job->is_profiled) panfrost_cycle_counter_get(pfdev); - job->is_profiled = true; + + if (job->is_profiled) { job->start_time = ktime_get(); job->start_cycles = panfrost_cycle_counter_read(pfdev); } @@ -693,7 +698,8 @@ panfrost_reset(struct panfrost_device *pfdev, spin_lock(&pfdev->js->job_lock); for (i = 0; i < NUM_JOB_SLOTS; i++) { for (j = 0; j < ARRAY_SIZE(pfdev->jobs[0]) && pfdev->jobs[i][j]; j++) { - if (pfdev->jobs[i][j]->is_profiled) + if (pfdev->jobs[i][j]->requirements & PANFROST_JD_REQ_CYCLE_COUNT || + pfdev->jobs[i][j]->is_profiled) panfrost_cycle_counter_put(pfdev->jobs[i][j]->pfdev); pm_runtime_put_noidle(pfdev->dev); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); @@ -727,7 +733,7 @@ panfrost_reset(struct panfrost_device *pfdev, /* Restart the schedulers */ for (i = 0; i < NUM_JOB_SLOTS; i++) - drm_sched_start(&pfdev->js->queue[i].sched); + drm_sched_start(&pfdev->js->queue[i].sched, 0); /* Re-enable job interrupts now that everything has been restarted. */ job_write(pfdev, JOB_INT_MASK, diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h index c25743b05c55..c7bba476ab3f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_regs.h +++ b/drivers/gpu/drm/panfrost/panfrost_regs.h @@ -78,6 +78,8 @@ #define GPU_CYCLE_COUNT_LO 0x90 #define GPU_CYCLE_COUNT_HI 0x94 +#define GPU_TIMESTAMP_LO 0x98 +#define GPU_TIMESTAMP_HI 0x9C #define GPU_THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */ #define GPU_THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */ diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c index 34182f67136c..0caf9e9a8c45 100644 --- a/drivers/gpu/drm/panthor/panthor_drv.c +++ b/drivers/gpu/drm/panthor/panthor_drv.c @@ -3,6 +3,10 @@ /* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */ /* Copyright 2019 Collabora ltd. */ +#ifdef CONFIG_ARM_ARCH_TIMER +#include <asm/arch_timer.h> +#endif + #include <linux/list.h> #include <linux/module.h> #include <linux/of_platform.h> @@ -165,6 +169,7 @@ panthor_get_uobj_array(const struct drm_panthor_obj_array *in, u32 min_stride, _Generic(_obj_name, \ PANTHOR_UOBJ_DECL(struct drm_panthor_gpu_info, tiler_present), \ PANTHOR_UOBJ_DECL(struct drm_panthor_csif_info, pad), \ + PANTHOR_UOBJ_DECL(struct drm_panthor_timestamp_info, current_timestamp), \ PANTHOR_UOBJ_DECL(struct drm_panthor_sync_op, timeline_value), \ PANTHOR_UOBJ_DECL(struct drm_panthor_queue_submit, syncs), \ PANTHOR_UOBJ_DECL(struct drm_panthor_queue_create, ringbuf_size), \ @@ -751,10 +756,33 @@ static void panthor_submit_ctx_cleanup(struct panthor_submit_ctx *ctx, kvfree(ctx->jobs); } +static int panthor_query_timestamp_info(struct panthor_device *ptdev, + struct drm_panthor_timestamp_info *arg) +{ + int ret; + + ret = pm_runtime_resume_and_get(ptdev->base.dev); + if (ret) + return ret; + +#ifdef CONFIG_ARM_ARCH_TIMER + arg->timestamp_frequency = arch_timer_get_cntfrq(); +#else + arg->timestamp_frequency = 0; +#endif + arg->current_timestamp = panthor_gpu_read_timestamp(ptdev); + arg->timestamp_offset = panthor_gpu_read_timestamp_offset(ptdev); + + pm_runtime_put(ptdev->base.dev); + return 0; +} + static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct drm_file *file) { struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base); struct drm_panthor_dev_query *args = data; + struct drm_panthor_timestamp_info timestamp_info; + int ret; if (!args->pointer) { switch (args->type) { @@ -766,6 +794,10 @@ static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct d args->size = sizeof(ptdev->csif_info); return 0; + case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: + args->size = sizeof(timestamp_info); + return 0; + default: return -EINVAL; } @@ -778,6 +810,14 @@ static int panthor_ioctl_dev_query(struct drm_device *ddev, void *data, struct d case DRM_PANTHOR_DEV_QUERY_CSIF_INFO: return PANTHOR_UOBJ_SET(args->pointer, args->size, ptdev->csif_info); + case DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: + ret = panthor_query_timestamp_info(ptdev, ×tamp_info); + + if (ret) + return ret; + + return PANTHOR_UOBJ_SET(args->pointer, args->size, timestamp_info); + default: return -EINVAL; } @@ -1395,6 +1435,7 @@ static void panthor_debugfs_init(struct drm_minor *minor) /* * PanCSF driver version: * - 1.0 - initial interface + * - 1.1 - adds DEV_QUERY_TIMESTAMP_INFO query */ static const struct drm_driver panthor_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ | @@ -1408,7 +1449,7 @@ static const struct drm_driver panthor_drm_driver = { .desc = "Panthor DRM driver", .date = "20230801", .major = 1, - .minor = 0, + .minor = 1, .gem_create_object = panthor_gem_create_object, .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index 5251d8764e7d..2d3529a0b156 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -480,3 +480,50 @@ void panthor_gpu_resume(struct panthor_device *ptdev) panthor_gpu_irq_resume(&ptdev->gpu->irq, GPU_INTERRUPTS_MASK); panthor_gpu_l2_power_on(ptdev); } + +/** + * panthor_gpu_read_64bit_counter() - Read a 64-bit counter at a given offset. + * @ptdev: Device. + * @reg: The offset of the register to read. + * + * Return: The counter value. + */ +static u64 +panthor_gpu_read_64bit_counter(struct panthor_device *ptdev, u32 reg) +{ + u32 hi, lo; + + do { + hi = gpu_read(ptdev, reg + 0x4); + lo = gpu_read(ptdev, reg); + } while (hi != gpu_read(ptdev, reg + 0x4)); + + return ((u64)hi << 32) | lo; +} + +/** + * panthor_gpu_read_timestamp() - Read the timestamp register. + * @ptdev: Device. + * + * Return: The GPU timestamp value. + */ +u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev) +{ + return panthor_gpu_read_64bit_counter(ptdev, GPU_TIMESTAMP_LO); +} + +/** + * panthor_gpu_read_timestamp_offset() - Read the timestamp offset register. + * @ptdev: Device. + * + * Return: The GPU timestamp offset value. + */ +u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev) +{ + u32 hi, lo; + + hi = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_HI); + lo = gpu_read(ptdev, GPU_TIMESTAMP_OFFSET_LO); + + return ((u64)hi << 32) | lo; +} diff --git a/drivers/gpu/drm/panthor/panthor_gpu.h b/drivers/gpu/drm/panthor/panthor_gpu.h index bba7555dd3c6..7f6133a66127 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.h +++ b/drivers/gpu/drm/panthor/panthor_gpu.h @@ -5,6 +5,8 @@ #ifndef __PANTHOR_GPU_H__ #define __PANTHOR_GPU_H__ +#include <linux/types.h> + struct panthor_device; int panthor_gpu_init(struct panthor_device *ptdev); @@ -48,5 +50,7 @@ int panthor_gpu_l2_power_on(struct panthor_device *ptdev); int panthor_gpu_flush_caches(struct panthor_device *ptdev, u32 l2, u32 lsc, u32 other); int panthor_gpu_soft_reset(struct panthor_device *ptdev); +u64 panthor_gpu_read_timestamp(struct panthor_device *ptdev); +u64 panthor_gpu_read_timestamp_offset(struct panthor_device *ptdev); #endif diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index bbc12728437f..37f1885c54c7 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -833,7 +833,7 @@ static void panthor_vm_stop(struct panthor_vm *vm) static void panthor_vm_start(struct panthor_vm *vm) { - drm_sched_start(&vm->sched); + drm_sched_start(&vm->sched, 0); } /** @@ -2708,9 +2708,9 @@ int panthor_mmu_init(struct panthor_device *ptdev) * which passes iova as an unsigned long. Patch the mmu_features to reflect this * limitation. */ - if (sizeof(unsigned long) * 8 < va_bits) { + if (va_bits > BITS_PER_LONG) { ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0); - ptdev->gpu_info.mmu_features |= sizeof(unsigned long) * 8; + ptdev->gpu_info.mmu_features |= BITS_PER_LONG; } return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 91a31b70c037..42afdf0ddb7e 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -2538,7 +2538,7 @@ static void queue_start(struct panthor_queue *queue) list_for_each_entry(job, &queue->scheduler.pending_list, base.list) job->base.s_fence->parent = dma_fence_get(job->done_fence); - drm_sched_start(&queue->scheduler); + drm_sched_start(&queue->scheduler, 0); } static void panthor_group_stop(struct panthor_group *group) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index ab53ab486fe6..f093616fe53c 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -674,9 +674,10 @@ EXPORT_SYMBOL(drm_sched_stop); * drm_sched_start - recover jobs after a reset * * @sched: scheduler instance + * @errno: error to set on the pending fences * */ -void drm_sched_start(struct drm_gpu_scheduler *sched) +void drm_sched_start(struct drm_gpu_scheduler *sched, int errno) { struct drm_sched_job *s_job, *tmp; @@ -691,13 +692,13 @@ void drm_sched_start(struct drm_gpu_scheduler *sched) atomic_add(s_job->credits, &sched->credit_count); if (!fence) { - drm_sched_job_done(s_job, -ECANCELED); + drm_sched_job_done(s_job, errno ?: -ECANCELED); continue; } if (dma_fence_add_callback(fence, &s_job->cb, drm_sched_job_done_cb)) - drm_sched_job_done(s_job, fence->error); + drm_sched_job_done(s_job, fence->error ?: errno); } drm_sched_start_timeout_unlocked(sched); diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index b4eb030ea961..d275404ad0e9 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -76,8 +76,8 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_ /* * Imported buffers need special treatment to satisfy the semantics of DMA-BUF. */ - if (gem->import_attach) { - struct dma_buf *buf = gem->import_attach->dmabuf; + if (obj->dma_buf) { + struct dma_buf *buf = obj->dma_buf; map->attach = dma_buf_attach(buf, dev); if (IS_ERR(map->attach)) { @@ -184,8 +184,8 @@ static void *tegra_bo_mmap(struct host1x_bo *bo) if (obj->vaddr) return obj->vaddr; - if (obj->gem.import_attach) { - ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map); + if (obj->dma_buf) { + ret = dma_buf_vmap_unlocked(obj->dma_buf, &map); if (ret < 0) return ERR_PTR(ret); @@ -208,8 +208,8 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr) if (obj->vaddr) return; - if (obj->gem.import_attach) - return dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map); + if (obj->dma_buf) + return dma_buf_vunmap_unlocked(obj->dma_buf, &map); vunmap(addr); } @@ -465,27 +465,32 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm, if (IS_ERR(bo)) return bo; - attach = dma_buf_attach(buf, drm->dev); - if (IS_ERR(attach)) { - err = PTR_ERR(attach); - goto free; - } - - get_dma_buf(buf); + /* + * If we need to use IOMMU API to map the dma-buf into the internally managed + * domain, map it first to the DRM device to get an sgt. + */ + if (tegra->domain) { + attach = dma_buf_attach(buf, drm->dev); + if (IS_ERR(attach)) { + err = PTR_ERR(attach); + goto free; + } - bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); - if (IS_ERR(bo->sgt)) { - err = PTR_ERR(bo->sgt); - goto detach; - } + bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE); + if (IS_ERR(bo->sgt)) { + err = PTR_ERR(bo->sgt); + goto detach; + } - if (tegra->domain) { err = tegra_bo_iommu_map(tegra, bo); if (err < 0) goto detach; + + bo->gem.import_attach = attach; } - bo->gem.import_attach = attach; + get_dma_buf(buf); + bo->dma_buf = buf; return bo; @@ -516,17 +521,21 @@ void tegra_bo_free_object(struct drm_gem_object *gem) dev_name(mapping->dev)); } - if (tegra->domain) + if (tegra->domain) { tegra_bo_iommu_unmap(tegra, bo); - if (gem->import_attach) { - dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt, - DMA_TO_DEVICE); - drm_prime_gem_destroy(gem, NULL); - } else { - tegra_bo_free(gem->dev, bo); + if (gem->import_attach) { + dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt, + DMA_TO_DEVICE); + dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach); + } } + tegra_bo_free(gem->dev, bo); + + if (bo->dma_buf) + dma_buf_put(bo->dma_buf); + drm_gem_object_release(gem); kfree(bo); } diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h index cb5146a67668..bf2cbd48eb3f 100644 --- a/drivers/gpu/drm/tegra/gem.h +++ b/drivers/gpu/drm/tegra/gem.h @@ -32,6 +32,26 @@ struct tegra_bo_tiling { enum tegra_bo_sector_layout sector_layout; }; +/* + * How memory is referenced within a tegra_bo: + * + * Buffer source | Mapping API(*) | Fields + * ---------------+-----------------+--------------- + * Allocated here | DMA API | iova (IOVA mapped to drm->dev), vaddr (CPU VA) + * + * Allocated here | IOMMU API | pages/num_pages (Phys. memory), sgt (Mapped to drm->dev), + * | iova/size (Mapped to domain) + * + * Imported | DMA API | dma_buf (Imported dma_buf) + * + * Imported | IOMMU API | dma_buf (Imported dma_buf), + * | gem->import_attach (Attachment on drm->dev), + * | sgt (Mapped to drm->dev) + * | iova/size (Mapped to domain) + * + * (*) If tegra->domain is set, i.e. TegraDRM IOMMU domain is directly managed through IOMMU API, + * this is IOMMU API. Otherwise DMA API. + */ struct tegra_bo { struct drm_gem_object gem; struct host1x_bo base; @@ -39,6 +59,7 @@ struct tegra_bo { struct sg_table *sgt; dma_addr_t iova; void *vaddr; + struct dma_buf *dma_buf; struct drm_mm_node *mm; unsigned long num_pages; diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 4de1ea0fc7c0..00c8564520e7 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -46,7 +46,6 @@ struct gr3d { unsigned int nclocks; struct reset_control_bulk_data resets[RST_GR3D_MAX]; unsigned int nresets; - struct dev_pm_domain_list *pd_list; DECLARE_BITMAP(addr_regs, GR3D_NUM_REGS); }; @@ -370,12 +369,18 @@ static int gr3d_power_up_legacy_domain(struct device *dev, const char *name, return 0; } +static void gr3d_del_link(void *link) +{ + device_link_del(link); +} + static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) { - struct dev_pm_domain_attach_data pd_data = { - .pd_names = (const char *[]) { "3d0", "3d1" }, - .num_pd_names = 2, - }; + static const char * const opp_genpd_names[] = { "3d0", "3d1", NULL }; + const u32 link_flags = DL_FLAG_STATELESS | DL_FLAG_PM_RUNTIME; + struct device **opp_virt_devs, *pd_dev; + struct device_link *link; + unsigned int i; int err; err = of_count_phandle_with_args(dev->of_node, "power-domains", @@ -409,10 +414,29 @@ static int gr3d_init_power(struct device *dev, struct gr3d *gr3d) if (dev->pm_domain) return 0; - err = dev_pm_domain_attach_list(dev, &pd_data, &gr3d->pd_list); - if (err < 0) + err = devm_pm_opp_attach_genpd(dev, opp_genpd_names, &opp_virt_devs); + if (err) return err; + for (i = 0; opp_genpd_names[i]; i++) { + pd_dev = opp_virt_devs[i]; + if (!pd_dev) { + dev_err(dev, "failed to get %s power domain\n", + opp_genpd_names[i]); + return -EINVAL; + } + + link = device_link_add(dev, pd_dev, link_flags); + if (!link) { + dev_err(dev, "failed to link to %s\n", dev_name(pd_dev)); + return -EINVAL; + } + + err = devm_add_action_or_reset(dev, gr3d_del_link, link); + if (err) + return err; + } + return 0; } @@ -503,13 +527,13 @@ static int gr3d_probe(struct platform_device *pdev) err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev); if (err) - goto err; + return err; err = host1x_client_register(&gr3d->client.base); if (err < 0) { dev_err(&pdev->dev, "failed to register host1x client: %d\n", err); - goto err; + return err; } /* initialize address register map */ @@ -517,9 +541,6 @@ static int gr3d_probe(struct platform_device *pdev) set_bit(gr3d_addr_regs[i], gr3d->addr_regs); return 0; -err: - dev_pm_domain_detach_list(gr3d->pd_list); - return err; } static void gr3d_remove(struct platform_device *pdev) @@ -528,7 +549,6 @@ static void gr3d_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); host1x_client_unregister(&gr3d->client.base); - dev_pm_domain_detach_list(gr3d->pd_list); } static int __maybe_unused gr3d_runtime_suspend(struct device *dev) diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 09987e372e3e..6bf2dae82ca0 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -434,7 +434,7 @@ tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pix_clock, static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi) { - const unsigned int freqs[] = { + static const unsigned int freqs[] = { 32000, 44100, 48000, 88200, 96000, 176400, 192000 }; unsigned int i; diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index f6889f649bc1..8f206c6387ec 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -13,10 +13,8 @@ config DRM_ARCPGU config DRM_BOCHS tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" depends on DRM && PCI && MMU + select DRM_GEM_SHMEM_HELPER select DRM_KMS_HELPER - select DRM_VRAM_HELPER - select DRM_TTM - select DRM_TTM_HELPER help This is a KMS driver for qemu's stdvga output. Choose this option for qemu. diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index 31fc5d839e10..69c5f65e9853 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -4,18 +4,21 @@ #include <linux/pci.h> #include <drm/drm_aperture.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_damage_helper.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> -#include <drm/drm_fbdev_ttm.h> +#include <drm/drm_fbdev_shmem.h> #include <drm/drm_fourcc.h> #include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> #include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_gem_vram_helper.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_managed.h> #include <drm/drm_module.h> +#include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_simple_kms_helper.h> #include <video/vga.h> @@ -71,6 +74,8 @@ enum bochs_types { }; struct bochs_device { + struct drm_device dev; + /* hw */ void __iomem *mmio; int ioports; @@ -85,14 +90,19 @@ struct bochs_device { u16 yres_virtual; u32 stride; u32 bpp; - const struct drm_edid *drm_edid; /* drm */ - struct drm_device *dev; - struct drm_simple_display_pipe pipe; + struct drm_plane primary_plane; + struct drm_crtc crtc; + struct drm_encoder encoder; struct drm_connector connector; }; +static struct bochs_device *to_bochs_device(const struct drm_device *dev) +{ + return container_of(dev, struct bochs_device, dev); +} + /* ---------------------------------------------------------------------- */ static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val) @@ -172,12 +182,14 @@ static void bochs_hw_set_little_endian(struct bochs_device *bochs) #define bochs_hw_set_native_endian(_b) bochs_hw_set_little_endian(_b) #endif -static int bochs_get_edid_block(void *data, u8 *buf, - unsigned int block, size_t len) +static int bochs_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) { struct bochs_device *bochs = data; size_t i, start = block * EDID_LENGTH; + if (!bochs->mmio) + return -1; + if (start + len > 0x400 /* vga register offset */) return -1; @@ -187,43 +199,38 @@ static int bochs_get_edid_block(void *data, u8 *buf, return 0; } -static int bochs_hw_load_edid(struct bochs_device *bochs) +static const struct drm_edid *bochs_hw_read_edid(struct drm_connector *connector) { + struct drm_device *dev = connector->dev; + struct bochs_device *bochs = to_bochs_device(dev); u8 header[8]; - if (!bochs->mmio) - return -1; - /* check header to detect whenever edid support is enabled in qemu */ bochs_get_edid_block(bochs, header, 0, ARRAY_SIZE(header)); if (drm_edid_header_is_valid(header) != 8) - return -1; + return NULL; - drm_edid_free(bochs->drm_edid); - bochs->drm_edid = drm_edid_read_custom(&bochs->connector, - bochs_get_edid_block, bochs); - if (!bochs->drm_edid) - return -1; + drm_dbg(dev, "Found EDID data blob.\n"); - return 0; + return drm_edid_read_custom(connector, bochs_get_edid_block, bochs); } -static int bochs_hw_init(struct drm_device *dev) +static int bochs_hw_init(struct bochs_device *bochs) { - struct bochs_device *bochs = dev->dev_private; + struct drm_device *dev = &bochs->dev; struct pci_dev *pdev = to_pci_dev(dev->dev); unsigned long addr, size, mem, ioaddr, iosize; u16 id; if (pdev->resource[2].flags & IORESOURCE_MEM) { + ioaddr = pci_resource_start(pdev, 2); + iosize = pci_resource_len(pdev, 2); /* mmio bar with vga and bochs registers present */ - if (pci_request_region(pdev, 2, "bochs-drm") != 0) { + if (!devm_request_mem_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) { DRM_ERROR("Cannot request mmio region\n"); return -EBUSY; } - ioaddr = pci_resource_start(pdev, 2); - iosize = pci_resource_len(pdev, 2); - bochs->mmio = ioremap(ioaddr, iosize); + bochs->mmio = devm_ioremap(&pdev->dev, ioaddr, iosize); if (bochs->mmio == NULL) { DRM_ERROR("Cannot map mmio region\n"); return -ENOMEM; @@ -231,7 +238,7 @@ static int bochs_hw_init(struct drm_device *dev) } else { ioaddr = VBE_DISPI_IOPORT_INDEX; iosize = 2; - if (!request_region(ioaddr, iosize, "bochs-drm")) { + if (!devm_request_region(&pdev->dev, ioaddr, iosize, "bochs-drm")) { DRM_ERROR("Cannot request ioports\n"); return -EBUSY; } @@ -258,10 +265,10 @@ static int bochs_hw_init(struct drm_device *dev) size = min(size, mem); } - if (pci_request_region(pdev, 0, "bochs-drm") != 0) + if (!devm_request_mem_region(&pdev->dev, addr, size, "bochs-drm")) DRM_WARN("Cannot request framebuffer, boot fb still active?\n"); - bochs->fb_map = ioremap(addr, size); + bochs->fb_map = devm_ioremap(&pdev->dev, addr, size); if (bochs->fb_map == NULL) { DRM_ERROR("Cannot map framebuffer\n"); return -ENOMEM; @@ -290,22 +297,6 @@ noext: return 0; } -static void bochs_hw_fini(struct drm_device *dev) -{ - struct bochs_device *bochs = dev->dev_private; - - /* TODO: shot down existing vram mappings */ - - if (bochs->mmio) - iounmap(bochs->mmio); - if (bochs->ioports) - release_region(VBE_DISPI_IOPORT_INDEX, 2); - if (bochs->fb_map) - iounmap(bochs->fb_map); - pci_release_regions(to_pci_dev(dev->dev)); - drm_edid_free(bochs->drm_edid); -} - static void bochs_hw_blank(struct bochs_device *bochs, bool blank) { DRM_DEBUG_DRIVER("hw_blank %d\n", blank); @@ -321,7 +312,7 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode { int idx; - if (!drm_dev_enter(bochs->dev, &idx)) + if (!drm_dev_enter(&bochs->dev, &idx)) return; bochs->xres = mode->hdisplay; @@ -357,7 +348,7 @@ static void bochs_hw_setformat(struct bochs_device *bochs, const struct drm_form { int idx; - if (!drm_dev_enter(bochs->dev, &idx)) + if (!drm_dev_enter(&bochs->dev, &idx)) return; DRM_DEBUG_DRIVER("format %c%c%c%c\n", @@ -388,7 +379,7 @@ static void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, int strid unsigned long offset; unsigned int vx, vy, vwidth, idx; - if (!drm_dev_enter(bochs->dev, &idx)) + if (!drm_dev_enter(&bochs->dev, &idx)) return; bochs->stride = stride; @@ -410,83 +401,156 @@ static void bochs_hw_setbase(struct bochs_device *bochs, int x, int y, int strid /* ---------------------------------------------------------------------- */ -static const uint32_t bochs_formats[] = { +static const uint32_t bochs_primary_plane_formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_BGRX8888, }; -static void bochs_plane_update(struct bochs_device *bochs, struct drm_plane_state *state) +static int bochs_primary_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) { - struct drm_gem_vram_object *gbo; - s64 gpu_addr; + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + int ret; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); + if (ret) + return ret; + else if (!new_plane_state->visible) + return 0; + + return 0; +} - if (!state->fb || !bochs->stride) +static void bochs_primary_plane_helper_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_device *dev = plane->dev; + struct bochs_device *bochs = to_bochs_device(dev); + struct drm_plane_state *plane_state = plane->state; + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); + struct drm_framebuffer *fb = plane_state->fb; + struct drm_atomic_helper_damage_iter iter; + struct drm_rect damage; + + if (!fb || !bochs->stride) return; - gbo = drm_gem_vram_of_gem(state->fb->obj[0]); - gpu_addr = drm_gem_vram_offset(gbo); - if (WARN_ON_ONCE(gpu_addr < 0)) - return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ + drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state); + drm_atomic_for_each_plane_damage(&iter, &damage) { + struct iosys_map dst = IOSYS_MAP_INIT_VADDR_IOMEM(bochs->fb_map); + + iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, &damage)); + drm_fb_memcpy(&dst, fb->pitches, shadow_plane_state->data, fb, &damage); + } + /* Always scanout image at VRAM offset 0 */ bochs_hw_setbase(bochs, - state->crtc_x, - state->crtc_y, - state->fb->pitches[0], - state->fb->offsets[0] + gpu_addr); - bochs_hw_setformat(bochs, state->fb->format); + plane_state->crtc_x, + plane_state->crtc_y, + fb->pitches[0], + 0); + bochs_hw_setformat(bochs, fb->format); } -static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *crtc_state, - struct drm_plane_state *plane_state) +static const struct drm_plane_helper_funcs bochs_primary_plane_helper_funcs = { + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .atomic_check = bochs_primary_plane_helper_atomic_check, + .atomic_update = bochs_primary_plane_helper_atomic_update, +}; + +static const struct drm_plane_funcs bochs_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + DRM_GEM_SHADOW_PLANE_FUNCS +}; + +static void bochs_crtc_helper_mode_set_nofb(struct drm_crtc *crtc) { - struct bochs_device *bochs = pipe->crtc.dev->dev_private; + struct bochs_device *bochs = to_bochs_device(crtc->dev); + struct drm_crtc_state *crtc_state = crtc->state; bochs_hw_setmode(bochs, &crtc_state->mode); - bochs_plane_update(bochs, plane_state); } -static void bochs_pipe_disable(struct drm_simple_display_pipe *pipe) +static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) { - struct bochs_device *bochs = pipe->crtc.dev->dev_private; + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); - bochs_hw_blank(bochs, true); + if (!crtc_state->enable) + return 0; + + return drm_atomic_helper_check_crtc_primary_plane(crtc_state); } -static void bochs_pipe_update(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state) +static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) { - struct bochs_device *bochs = pipe->crtc.dev->dev_private; +} + +static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *crtc_state) +{ + struct bochs_device *bochs = to_bochs_device(crtc->dev); - bochs_plane_update(bochs, pipe->plane.state); + bochs_hw_blank(bochs, true); } -static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = { - .enable = bochs_pipe_enable, - .disable = bochs_pipe_disable, - .update = bochs_pipe_update, - .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb, - .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb, +static const struct drm_crtc_helper_funcs bochs_crtc_helper_funcs = { + .mode_set_nofb = bochs_crtc_helper_mode_set_nofb, + .atomic_check = bochs_crtc_helper_atomic_check, + .atomic_enable = bochs_crtc_helper_atomic_enable, + .atomic_disable = bochs_crtc_helper_atomic_disable, +}; + +static const struct drm_crtc_funcs bochs_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static const struct drm_encoder_funcs bochs_encoder_funcs = { + .destroy = drm_encoder_cleanup, }; -static int bochs_connector_get_modes(struct drm_connector *connector) +static int bochs_connector_helper_get_modes(struct drm_connector *connector) { + const struct drm_edid *edid; int count; - count = drm_edid_connector_add_modes(connector); + edid = bochs_hw_read_edid(connector); - if (!count) { + if (edid) { + drm_edid_connector_update(connector, edid); + count = drm_edid_connector_add_modes(connector); + drm_edid_free(edid); + } else { + drm_edid_connector_update(connector, NULL); count = drm_add_modes_noedid(connector, 8192, 8192); drm_set_preferred_mode(connector, defx, defy); } + return count; } -static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = { - .get_modes = bochs_connector_get_modes, +static const struct drm_connector_helper_funcs bochs_connector_helper_funcs = { + .get_modes = bochs_connector_helper_get_modes, }; -static const struct drm_connector_funcs bochs_connector_connector_funcs = { +static const struct drm_connector_funcs bochs_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .destroy = drm_connector_cleanup, .reset = drm_atomic_helper_connector_reset, @@ -494,68 +558,89 @@ static const struct drm_connector_funcs bochs_connector_connector_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static void bochs_connector_init(struct drm_device *dev) +static enum drm_mode_status bochs_mode_config_mode_valid(struct drm_device *dev, + const struct drm_display_mode *mode) { - struct bochs_device *bochs = dev->dev_private; - struct drm_connector *connector = &bochs->connector; - - drm_connector_init(dev, connector, &bochs_connector_connector_funcs, - DRM_MODE_CONNECTOR_VIRTUAL); - drm_connector_helper_add(connector, &bochs_connector_connector_helper_funcs); - - bochs_hw_load_edid(bochs); - if (bochs->drm_edid) { - DRM_INFO("Found EDID data blob.\n"); - drm_connector_attach_edid_property(connector); - drm_edid_connector_update(&bochs->connector, bochs->drm_edid); - } -} + struct bochs_device *bochs = to_bochs_device(dev); + const struct drm_format_info *format = drm_format_info(DRM_FORMAT_XRGB8888); + u64 pitch; -static struct drm_framebuffer * -bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file, - const struct drm_mode_fb_cmd2 *mode_cmd) -{ - if (mode_cmd->pixel_format != DRM_FORMAT_XRGB8888 && - mode_cmd->pixel_format != DRM_FORMAT_BGRX8888) - return ERR_PTR(-EINVAL); + if (drm_WARN_ON(dev, !format)) + return MODE_ERROR; - return drm_gem_fb_create(dev, file, mode_cmd); + pitch = drm_format_info_min_pitch(format, 0, mode->hdisplay); + if (!pitch) + return MODE_BAD_WIDTH; + if (mode->vdisplay > DIV_ROUND_DOWN_ULL(bochs->fb_size, pitch)) + return MODE_MEM; + + return MODE_OK; } -static const struct drm_mode_config_funcs bochs_mode_funcs = { - .fb_create = bochs_gem_fb_create, - .mode_valid = drm_vram_helper_mode_valid, +static const struct drm_mode_config_funcs bochs_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .mode_valid = bochs_mode_config_mode_valid, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; static int bochs_kms_init(struct bochs_device *bochs) { + struct drm_device *dev = &bochs->dev; + struct drm_plane *primary_plane; + struct drm_crtc *crtc; + struct drm_connector *connector; + struct drm_encoder *encoder; int ret; - ret = drmm_mode_config_init(bochs->dev); + ret = drmm_mode_config_init(dev); if (ret) return ret; - bochs->dev->mode_config.max_width = 8192; - bochs->dev->mode_config.max_height = 8192; + dev->mode_config.max_width = 8192; + dev->mode_config.max_height = 8192; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.quirk_addfb_prefer_host_byte_order = true; + + dev->mode_config.funcs = &bochs_mode_config_funcs; + + primary_plane = &bochs->primary_plane; + ret = drm_universal_plane_init(dev, primary_plane, 0, + &bochs_primary_plane_funcs, + bochs_primary_plane_formats, + ARRAY_SIZE(bochs_primary_plane_formats), + NULL, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + drm_plane_helper_add(primary_plane, &bochs_primary_plane_helper_funcs); + drm_plane_enable_fb_damage_clips(primary_plane); - bochs->dev->mode_config.preferred_depth = 24; - bochs->dev->mode_config.prefer_shadow = 0; - bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true; + crtc = &bochs->crtc; + ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL, + &bochs_crtc_funcs, NULL); + if (ret) + return ret; + drm_crtc_helper_add(crtc, &bochs_crtc_helper_funcs); - bochs->dev->mode_config.funcs = &bochs_mode_funcs; + encoder = &bochs->encoder; + ret = drm_encoder_init(dev, encoder, &bochs_encoder_funcs, + DRM_MODE_ENCODER_VIRTUAL, NULL); + if (ret) + return ret; + encoder->possible_crtcs = drm_crtc_mask(crtc); - bochs_connector_init(bochs->dev); - drm_simple_display_pipe_init(bochs->dev, - &bochs->pipe, - &bochs_pipe_funcs, - bochs_formats, - ARRAY_SIZE(bochs_formats), - NULL, - &bochs->connector); + connector = &bochs->connector; + ret = drm_connector_init(dev, connector, &bochs_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + if (ret) + return ret; + drm_connector_helper_add(connector, &bochs_connector_helper_funcs); + drm_connector_attach_edid_property(connector); + drm_connector_attach_encoder(connector, encoder); - drm_mode_config_reset(bochs->dev); + drm_mode_config_reset(dev); return 0; } @@ -563,34 +648,19 @@ static int bochs_kms_init(struct bochs_device *bochs) /* ---------------------------------------------------------------------- */ /* drm interface */ -static int bochs_load(struct drm_device *dev) +static int bochs_load(struct bochs_device *bochs) { - struct bochs_device *bochs; int ret; - bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL); - if (bochs == NULL) - return -ENOMEM; - dev->dev_private = bochs; - bochs->dev = dev; - - ret = bochs_hw_init(dev); + ret = bochs_hw_init(bochs); if (ret) return ret; - ret = drmm_vram_helper_init(dev, bochs->fb_base, bochs->fb_size); - if (ret) - goto err_hw_fini; - ret = bochs_kms_init(bochs); if (ret) - goto err_hw_fini; + return ret; return 0; - -err_hw_fini: - bochs_hw_fini(dev); - return ret; } DEFINE_DRM_GEM_FOPS(bochs_fops); @@ -603,7 +673,7 @@ static const struct drm_driver bochs_driver = { .date = "20130925", .major = 1, .minor = 0, - DRM_GEM_VRAM_DRIVER, + DRM_GEM_SHMEM_DRIVER_OPS, }; /* ---------------------------------------------------------------------- */ @@ -635,23 +705,18 @@ static const struct dev_pm_ops bochs_pm_ops = { static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct bochs_device *bochs; struct drm_device *dev; - unsigned long fbsize; int ret; - fbsize = pci_resource_len(pdev, 0); - if (fbsize < 4 * 1024 * 1024) { - DRM_ERROR("less than 4 MB video memory, ignoring device\n"); - return -ENOMEM; - } - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &bochs_driver); if (ret) return ret; - dev = drm_dev_alloc(&bochs_driver, &pdev->dev); - if (IS_ERR(dev)) + bochs = devm_drm_dev_alloc(&pdev->dev, &bochs_driver, struct bochs_device, dev); + if (IS_ERR(bochs)) return PTR_ERR(dev); + dev = &bochs->dev; ret = pcim_enable_device(pdev); if (ret) @@ -659,19 +724,17 @@ static int bochs_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent pci_set_drvdata(pdev, dev); - ret = bochs_load(dev); + ret = bochs_load(bochs); if (ret) goto err_free_dev; ret = drm_dev_register(dev, 0); if (ret) - goto err_hw_fini; + goto err_free_dev; - drm_fbdev_ttm_setup(dev, 32); + drm_fbdev_shmem_setup(dev, 32); return ret; -err_hw_fini: - bochs_hw_fini(dev); err_free_dev: drm_dev_put(dev); return ret; @@ -683,7 +746,6 @@ static void bochs_pci_remove(struct pci_dev *pdev) drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); - bochs_hw_fini(dev); drm_dev_put(dev); } diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 08d2a2739582..f58c38d7de29 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -667,7 +667,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) /* Unblock schedulers and restart their jobs. */ for (q = 0; q < V3D_MAX_QUEUES; q++) { - drm_sched_start(&v3d->queue[q].sched); + drm_sched_start(&v3d->queue[q].sched, 0); } mutex_unlock(&v3d->reset_lock); diff --git a/drivers/gpu/drm/vc4/tests/vc4_mock.c b/drivers/gpu/drm/vc4/tests/vc4_mock.c index 0731a7d85d7a..6527fb1db71e 100644 --- a/drivers/gpu/drm/vc4/tests/vc4_mock.c +++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c @@ -155,11 +155,11 @@ KUNIT_DEFINE_ACTION_WRAPPER(kunit_action_drm_dev_unregister, drm_dev_unregister, struct drm_device *); -static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5) +static struct vc4_dev *__mock_device(struct kunit *test, enum vc4_gen gen) { struct drm_device *drm; - const struct drm_driver *drv = is_vc5 ? &vc5_drm_driver : &vc4_drm_driver; - const struct vc4_mock_desc *desc = is_vc5 ? &vc5_mock : &vc4_mock; + const struct drm_driver *drv = (gen == VC4_GEN_5) ? &vc5_drm_driver : &vc4_drm_driver; + const struct vc4_mock_desc *desc = (gen == VC4_GEN_5) ? &vc5_mock : &vc4_mock; struct vc4_dev *vc4; struct device *dev; int ret; @@ -173,9 +173,9 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4); vc4->dev = dev; - vc4->is_vc5 = is_vc5; + vc4->gen = gen; - vc4->hvs = __vc4_hvs_alloc(vc4, NULL); + vc4->hvs = __vc4_hvs_alloc(vc4, NULL, NULL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4->hvs); drm = &vc4->base; @@ -198,10 +198,10 @@ static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5) struct vc4_dev *vc4_mock_device(struct kunit *test) { - return __mock_device(test, false); + return __mock_device(test, VC4_GEN_4); } struct vc4_dev *vc5_mock_device(struct kunit *test) { - return __mock_device(test, true); + return __mock_device(test, VC4_GEN_5); } diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 3f72be7490d5..fb450b6a4d44 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -251,7 +251,7 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4->purgeable.lock); @@ -265,7 +265,7 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* list_del_init() is used here because the caller might release @@ -396,7 +396,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return ERR_PTR(-ENODEV); bo = kzalloc(sizeof(*bo), GFP_KERNEL); @@ -427,7 +427,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, struct drm_gem_dma_object *dma_obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return ERR_PTR(-ENODEV); if (size == 0) @@ -496,7 +496,7 @@ int vc4_bo_dumb_create(struct drm_file *file_priv, struct vc4_bo *bo = NULL; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; ret = vc4_dumb_fixup_args(args); @@ -622,7 +622,7 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo) struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; /* Fast path: if the BO is already retained by someone, no need to @@ -661,7 +661,7 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* Fast path: if the BO is still retained by someone, no need to test @@ -783,7 +783,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo = NULL; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; ret = vc4_grab_bin_bo(vc4, vc4file); @@ -813,7 +813,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_vc4_mmap_bo *args = data; struct drm_gem_object *gem_obj; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; gem_obj = drm_gem_object_lookup(file_priv, args->handle); @@ -839,7 +839,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo = NULL; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->size == 0) @@ -918,7 +918,7 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo; bool t_format; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->flags != 0) @@ -964,7 +964,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->flags != 0 || args->modifier != 0) @@ -1007,7 +1007,7 @@ int vc4_bo_cache_init(struct drm_device *dev) int ret; int i; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; /* Create the initial set of BO labels that the kernel will @@ -1071,7 +1071,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; int ret = 0, label; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!args->len) diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 8b5a7e5eb146..575900ee67a5 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -105,6 +105,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, struct vc4_hvs *hvs = vc4->hvs; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); + unsigned int channel = vc4_crtc_state->assigned_channel; unsigned int cob_size; u32 val; int fifo_lines; @@ -121,7 +122,7 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, * Read vertical scanline which is currently composed for our * pixelvalve by the HVS, and also the scaler status. */ - val = HVS_READ(SCALER_DISPSTATX(vc4_crtc_state->assigned_channel)); + val = HVS_READ(SCALER_DISPSTATX(channel)); /* Get optional system timestamp after query. */ if (etime) @@ -137,11 +138,11 @@ static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc, *vpos /= 2; /* Use hpos to correct for field offset in interlaced mode. */ - if (vc4_hvs_get_fifo_frame_count(hvs, vc4_crtc_state->assigned_channel) % 2) + if (vc4_hvs_get_fifo_frame_count(hvs, channel) % 2) *hpos += mode->crtc_htotal / 2; } - cob_size = vc4_crtc_get_cob_allocation(vc4, vc4_crtc_state->assigned_channel); + cob_size = vc4_crtc_get_cob_allocation(vc4, channel); /* This is the offset we need for translating hvs -> pv scanout pos. */ fifo_lines = cob_size / mode->crtc_hdisplay; @@ -263,7 +264,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) * Removing 1 from the FIFO full level however * seems to completely remove that issue. */ - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1; return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; @@ -428,7 +429,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode if (is_dsi) CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep); - if (vc4->is_vc5) + if (vc4->gen == VC4_GEN_5) CRTC_WRITE(PV_MUX_CFG, VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP, PV_MUX_CFG_RGB_PIXEL_MUX_MODE)); @@ -735,10 +736,17 @@ int vc4_crtc_atomic_check(struct drm_crtc *crtc, if (conn_state->crtc != crtc) continue; - vc4_state->margins.left = conn_state->tv.margins.left; - vc4_state->margins.right = conn_state->tv.margins.right; - vc4_state->margins.top = conn_state->tv.margins.top; - vc4_state->margins.bottom = conn_state->tv.margins.bottom; + if (memcmp(&vc4_state->margins, &conn_state->tv.margins, + sizeof(vc4_state->margins))) { + memcpy(&vc4_state->margins, &conn_state->tv.margins, + sizeof(vc4_state->margins)); + + /* + * Need to force the dlist entries for all planes to be + * updated so that the dest rectangles are changed. + */ + crtc_state->zpos_changed = true; + } break; } @@ -913,7 +921,7 @@ static int vc4_async_set_fence_cb(struct drm_device *dev, struct dma_fence *fence; int ret; - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { struct vc4_bo *bo = to_vc4_bo(&dma_bo->base); return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno, @@ -1000,7 +1008,7 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, struct vc4_bo *bo = to_vc4_bo(&dma_bo->base); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; /* @@ -1043,7 +1051,7 @@ int vc4_page_flip(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - if (vc4->is_vc5) + if (vc4->gen > VC4_GEN_4) return vc5_async_page_flip(crtc, fb, event, flags); else return vc4_async_page_flip(crtc, fb, event, flags); @@ -1338,9 +1346,8 @@ int __vc4_crtc_init(struct drm_device *drm, drm_crtc_helper_add(crtc, crtc_helper_funcs); - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); - drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); /* We support CTM, but only for one CRTC at a time. It's therefore diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index c133e96b8aca..8c104ace3dc6 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -98,7 +98,7 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, if (args->pad != 0) return -EINVAL; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) @@ -147,7 +147,7 @@ static int vc4_open(struct drm_device *dev, struct drm_file *file) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL); @@ -165,7 +165,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file = file->driver_priv; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (vc4file->bin_bo_used) @@ -291,13 +291,17 @@ static int vc4_drm_bind(struct device *dev) struct vc4_dev *vc4; struct device_node *node; struct drm_crtc *crtc; - bool is_vc5; + enum vc4_gen gen; int ret = 0; dev->coherent_dma_mask = DMA_BIT_MASK(32); - is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"); - if (is_vc5) + if (of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5")) + gen = VC4_GEN_5; + else + gen = VC4_GEN_4; + + if (gen > VC4_GEN_4) driver = &vc5_drm_driver; else driver = &vc4_drm_driver; @@ -315,13 +319,13 @@ static int vc4_drm_bind(struct device *dev) vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base); if (IS_ERR(vc4)) return PTR_ERR(vc4); - vc4->is_vc5 = is_vc5; + vc4->gen = gen; vc4->dev = dev; drm = &vc4->base; platform_set_drvdata(pdev, drm); - if (!is_vc5) { + if (gen == VC4_GEN_4) { ret = drmm_mutex_init(drm, &vc4->bin_bo_lock); if (ret) goto err; @@ -335,7 +339,7 @@ static int vc4_drm_bind(struct device *dev) if (ret) goto err; - if (!is_vc5) { + if (gen == VC4_GEN_4) { ret = vc4_gem_init(drm); if (ret) goto err; diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 08e29fa82563..c6be1997f1c7 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -15,6 +15,7 @@ #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_encoder.h> +#include <drm/drm_fourcc.h> #include <drm/drm_gem_dma_helper.h> #include <drm/drm_managed.h> #include <drm/drm_mm.h> @@ -80,11 +81,16 @@ struct vc4_perfmon { u64 counters[] __counted_by(ncounters); }; +enum vc4_gen { + VC4_GEN_4, + VC4_GEN_5, +}; + struct vc4_dev { struct drm_device base; struct device *dev; - bool is_vc5; + enum vc4_gen gen; unsigned int irq; @@ -315,6 +321,7 @@ struct vc4_hvs { struct platform_device *pdev; void __iomem *regs; u32 __iomem *dlist; + unsigned int dlist_mem_size; struct clk *core_clk; @@ -394,7 +401,7 @@ struct vc4_plane_state { */ u32 pos0_offset; u32 pos2_offset; - u32 ptr0_offset; + u32 ptr0_offset[DRM_FORMAT_MAX_PLANES]; u32 lbm_offset; /* Offset where the plane's dlist was last stored in the @@ -404,7 +411,7 @@ struct vc4_plane_state { /* Clipped coordinates of the plane on the display. */ int crtc_x, crtc_y, crtc_w, crtc_h; - /* Clipped area being scanned from in the FB. */ + /* Clipped area being scanned from in the FB in u16.16 format */ u32 src_x, src_y; u32 src_w[2], src_h[2]; @@ -414,11 +421,6 @@ struct vc4_plane_state { bool is_unity; bool is_yuv; - /* Offset to start scanning out from the start of the plane's - * BO. - */ - u32 offsets[3]; - /* Our allocation in LBM for temporary storage during scaling. */ struct drm_mm_node lbm; @@ -598,12 +600,7 @@ struct vc4_crtc_state { bool txp_armed; unsigned int assigned_channel; - struct { - unsigned int left; - unsigned int right; - unsigned int top; - unsigned int bottom; - } margins; + struct drm_connector_tv_margins margins; unsigned long hvs_load; @@ -1002,7 +999,9 @@ void vc4_irq_reset(struct drm_device *dev); /* vc4_hvs.c */ extern struct platform_driver vc4_hvs_driver; -struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev); +struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, + void __iomem *regs, + struct platform_device *pdev); void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int output); int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output); u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo); diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 24fb1b57e1dd..22bccd69eb62 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -76,7 +76,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, u32 i; int ret = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -389,7 +389,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, unsigned long timeout_expire; DEFINE_WAIT(wait); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (vc4->finished_seqno >= seqno) @@ -474,7 +474,7 @@ vc4_submit_next_bin_job(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *exec; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; again: @@ -522,7 +522,7 @@ vc4_submit_next_render_job(struct drm_device *dev) if (!exec) return; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* A previous RCL may have written to one of our textures, and @@ -543,7 +543,7 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_dev *vc4 = to_vc4_dev(dev); bool was_empty = list_empty(&vc4->render_job_list); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; list_move_tail(&exec->head, &vc4->render_job_list); @@ -970,7 +970,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4) unsigned long irqflags; struct vc4_seqno_cb *cb, *cb_temp; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; spin_lock_irqsave(&vc4->job_lock, irqflags); @@ -1009,7 +1009,7 @@ int vc4_queue_seqno_cb(struct drm_device *dev, struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; cb->func = func; @@ -1065,7 +1065,7 @@ vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_wait_seqno *args = data; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno, @@ -1082,7 +1082,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->pad != 0) @@ -1131,7 +1131,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, args->shader_rec_size, args->bo_handle_count); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -1267,7 +1267,7 @@ int vc4_gem_init(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; vc4->dma_fence_context = dma_fence_context_alloc(1); @@ -1326,7 +1326,7 @@ int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; switch (args->madv) { diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 6611ab7c26a6..62b82b1eeb36 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -147,6 +147,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) if (!drm_dev_enter(drm, &idx)) return -ENODEV; + WARN_ON(pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev)); + drm_print_regset32(&p, &vc4_hdmi->hdmi_regset); drm_print_regset32(&p, &vc4_hdmi->hd_regset); drm_print_regset32(&p, &vc4_hdmi->cec_regset); @@ -156,6 +158,8 @@ static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) drm_print_regset32(&p, &vc4_hdmi->ram_regset); drm_print_regset32(&p, &vc4_hdmi->rm_regset); + pm_runtime_put(&vc4_hdmi->pdev->dev); + drm_dev_exit(idx); return 0; @@ -1594,6 +1598,7 @@ static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, VC4_HD_VID_CTL_CLRRGB | VC4_HD_VID_CTL_UNDERFLOW_ENABLE | VC4_HD_VID_CTL_FRAME_COUNTER_RESET | + VC4_HD_VID_CTL_BLANK_INSERT_EN | (vsync_pos ? 0 : VC4_HD_VID_CTL_VSYNC_LOW) | (hsync_pos ? 0 : VC4_HD_VID_CTL_HSYNC_LOW)); @@ -1920,7 +1925,7 @@ static int vc4_hdmi_audio_startup(struct device *dev, void *data) } if (!vc4_hdmi_audio_can_stream(vc4_hdmi)) { - ret = -ENODEV; + ret = -ENOTSUPP; goto out_dev_exit; } @@ -2047,6 +2052,7 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); struct drm_device *drm = vc4_hdmi->connector.dev; struct drm_connector *connector = &vc4_hdmi->connector; + struct vc4_dev *vc4 = to_vc4_dev(drm); unsigned int sample_rate = params->sample_rate; unsigned int channels = params->channels; unsigned long flags; @@ -2104,11 +2110,18 @@ static int vc4_hdmi_audio_prepare(struct device *dev, void *data, VC4_HDMI_AUDIO_PACKET_CEA_MASK); /* Set the MAI threshold */ - HDMI_WRITE(HDMI_MAI_THR, - VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICHIGH) | - VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_PANICLOW) | - VC4_SET_FIELD(0x06, VC4_HD_MAI_THR_DREQHIGH) | - VC4_SET_FIELD(0x08, VC4_HD_MAI_THR_DREQLOW)); + if (vc4->gen >= VC4_GEN_5) + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) | + VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) | + VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x1c, VC4_HD_MAI_THR_DREQLOW)); + else + HDMI_WRITE(HDMI_MAI_THR, + VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICHIGH) | + VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_PANICLOW) | + VC4_SET_FIELD(0x6, VC4_HD_MAI_THR_DREQHIGH) | + VC4_SET_FIELD(0x8, VC4_HD_MAI_THR_DREQLOW)); HDMI_WRITE(HDMI_MAI_CONFIG, VC4_HDMI_MAI_CONFIG_BIT_REVERSE | diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h index b04b2fc8d831..68455ce513e7 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h @@ -498,8 +498,11 @@ static inline void vc4_hdmi_write(struct vc4_hdmi *hdmi, field = &variant->registers[reg]; base = __vc4_hdmi_get_field_base(hdmi, field->reg); - if (!base) + if (!base) { + dev_warn(&hdmi->pdev->dev, + "Unknown register ID %u\n", reg); return; + } writel(value, base + field->offset); } diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 2a835a5cff9d..2a366a607fcc 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -33,7 +33,7 @@ #include "vc4_drv.h" #include "vc4_regs.h" -static const struct debugfs_reg32 hvs_regs[] = { +static const struct debugfs_reg32 vc4_hvs_regs[] = { VC4_REG32(SCALER_DISPCTRL), VC4_REG32(SCALER_DISPSTAT), VC4_REG32(SCALER_DISPID), @@ -110,7 +110,8 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; struct drm_printer p = drm_seq_file_printer(m); - unsigned int next_entry_start = 0; + unsigned int dlist_mem_size = hvs->dlist_mem_size; + unsigned int next_entry_start; unsigned int i, j; u32 dlist_word, dispstat; @@ -124,8 +125,9 @@ static int vc4_hvs_debugfs_dlist(struct seq_file *m, void *data) } drm_printf(&p, "HVS chan %u:\n", i); + next_entry_start = 0; - for (j = HVS_READ(SCALER_DISPLISTX(i)); j < 256; j++) { + for (j = HVS_READ(SCALER_DISPLISTX(i)); j < dlist_mem_size; j++) { dlist_word = readl((u32 __iomem *)vc4->hvs->dlist + j); drm_printf(&p, "dlist: %02d: 0x%08x\n", j, dlist_word); @@ -222,6 +224,9 @@ static void vc4_hvs_lut_load(struct vc4_hvs *hvs, if (!drm_dev_enter(drm, &idx)) return; + if (hvs->vc4->gen == VC4_GEN_4) + return; + /* The LUT memory is laid out with each HVS channel in order, * each of which takes 256 writes for R, 256 for G, then 256 * for B. @@ -291,53 +296,60 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output) u32 reg; int ret; - if (!vc4->is_vc5) + switch (vc4->gen) { + case VC4_GEN_4: return output; - /* - * NOTE: We should probably use drm_dev_enter()/drm_dev_exit() - * here, but this function is only used during the DRM device - * initialization, so we should be fine. - */ + case VC4_GEN_5: + /* + * NOTE: We should probably use + * drm_dev_enter()/drm_dev_exit() here, but this + * function is only used during the DRM device + * initialization, so we should be fine. + */ - switch (output) { - case 0: - return 0; + switch (output) { + case 0: + return 0; - case 1: - return 1; + case 1: + return 1; - case 2: - reg = HVS_READ(SCALER_DISPECTRL); - ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg); - if (ret == 0) - return 2; + case 2: + reg = HVS_READ(SCALER_DISPECTRL); + ret = FIELD_GET(SCALER_DISPECTRL_DSP2_MUX_MASK, reg); + if (ret == 0) + return 2; - return 0; + return 0; - case 3: - reg = HVS_READ(SCALER_DISPCTRL); - ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg); - if (ret == 3) - return -EPIPE; + case 3: + reg = HVS_READ(SCALER_DISPCTRL); + ret = FIELD_GET(SCALER_DISPCTRL_DSP3_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; - return ret; + return ret; - case 4: - reg = HVS_READ(SCALER_DISPEOLN); - ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg); - if (ret == 3) - return -EPIPE; + case 4: + reg = HVS_READ(SCALER_DISPEOLN); + ret = FIELD_GET(SCALER_DISPEOLN_DSP4_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; - return ret; + return ret; - case 5: - reg = HVS_READ(SCALER_DISPDITHER); - ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg); - if (ret == 3) - return -EPIPE; + case 5: + reg = HVS_READ(SCALER_DISPDITHER); + ret = FIELD_GET(SCALER_DISPDITHER_DSP5_MUX_MASK, reg); + if (ret == 3) + return -EPIPE; - return ret; + return ret; + + default: + return -EPIPE; + } default: return -EPIPE; @@ -372,7 +384,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, dispctrl = SCALER_DISPCTRLX_ENABLE; dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan)); - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { dispctrl |= VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) | VC4_SET_FIELD(mode->vdisplay, @@ -394,7 +406,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, dispbkgndx &= ~SCALER_DISPBKGND_INTERLACE; HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx | - ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) | + ((vc4->gen == VC4_GEN_4) ? SCALER_DISPBKGND_GAMMA : 0) | (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); /* Reload the LUT, since the SRAMs would have been disabled if @@ -415,13 +427,11 @@ void vc4_hvs_stop_channel(struct vc4_hvs *hvs, unsigned int chan) if (!drm_dev_enter(drm, &idx)) return; - if (HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE) + if (!(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_ENABLE)) goto out; - HVS_WRITE(SCALER_DISPCTRLX(chan), - HVS_READ(SCALER_DISPCTRLX(chan)) | SCALER_DISPCTRLX_RESET); - HVS_WRITE(SCALER_DISPCTRLX(chan), - HVS_READ(SCALER_DISPCTRLX(chan)) & ~SCALER_DISPCTRLX_ENABLE); + HVS_WRITE(SCALER_DISPCTRLX(chan), SCALER_DISPCTRLX_RESET); + HVS_WRITE(SCALER_DISPCTRLX(chan), 0); /* Once we leave, the scaler should be disabled and its fifo empty. */ WARN_ON_ONCE(HVS_READ(SCALER_DISPCTRLX(chan)) & SCALER_DISPCTRLX_RESET); @@ -456,17 +466,29 @@ int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) if (hweight32(crtc_state->connector_mask) > 1) return -EINVAL; - drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) - dlist_count += vc4_plane_dlist_size(plane_state); + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + u32 plane_dlist_count = vc4_plane_dlist_size(plane_state); + + drm_dbg_driver(dev, "[CRTC:%d:%s] Found [PLANE:%d:%s] with DLIST size: %u\n", + crtc->base.id, crtc->name, + plane->base.id, plane->name, + plane_dlist_count); + + dlist_count += plane_dlist_count; + } dlist_count++; /* Account for SCALER_CTL0_END. */ + drm_dbg_driver(dev, "[CRTC:%d:%s] Allocating DLIST block with size: %u\n", + crtc->base.id, crtc->name, dlist_count); spin_lock_irqsave(&vc4->hvs->mm_lock, flags); ret = drm_mm_insert_node(&vc4->hvs->dlist_mm, &vc4_state->mm, dlist_count); spin_unlock_irqrestore(&vc4->hvs->mm_lock, flags); - if (ret) + if (ret) { + drm_err(dev, "Failed to allocate DLIST entry: %d\n", ret); return ret; + } return 0; } @@ -668,7 +690,8 @@ void vc4_hvs_atomic_flush(struct drm_crtc *crtc, void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel) { - struct drm_device *drm = &hvs->vc4->base; + struct vc4_dev *vc4 = hvs->vc4; + struct drm_device *drm = &vc4->base; u32 dispctrl; int idx; @@ -676,8 +699,9 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel) return; dispctrl = HVS_READ(SCALER_DISPCTRL); - dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : - SCALER_DISPCTRL_DSPEISLUR(channel)); + dispctrl &= ~((vc4->gen == VC4_GEN_5) ? + SCALER5_DISPCTRL_DSPEISLUR(channel) : + SCALER_DISPCTRL_DSPEISLUR(channel)); HVS_WRITE(SCALER_DISPCTRL, dispctrl); @@ -686,7 +710,8 @@ void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel) void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel) { - struct drm_device *drm = &hvs->vc4->base; + struct vc4_dev *vc4 = hvs->vc4; + struct drm_device *drm = &vc4->base; u32 dispctrl; int idx; @@ -694,8 +719,9 @@ void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel) return; dispctrl = HVS_READ(SCALER_DISPCTRL); - dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : - SCALER_DISPCTRL_DSPEISLUR(channel)); + dispctrl |= ((vc4->gen == VC4_GEN_5) ? + SCALER5_DISPCTRL_DSPEISLUR(channel) : + SCALER_DISPCTRL_DSPEISLUR(channel)); HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_EUFLOW(channel)); @@ -738,8 +764,10 @@ static irqreturn_t vc4_hvs_irq_handler(int irq, void *data) control = HVS_READ(SCALER_DISPCTRL); for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) { - dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) : - SCALER_DISPCTRL_DSPEISLUR(channel); + dspeislur = (vc4->gen == VC4_GEN_5) ? + SCALER5_DISPCTRL_DSPEISLUR(channel) : + SCALER_DISPCTRL_DSPEISLUR(channel); + /* Interrupt masking is not always honored, so check it here. */ if (status & SCALER_DISPSTAT_EUFLOW(channel) && control & dspeislur) { @@ -767,7 +795,7 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor) if (!vc4->hvs) return -ENODEV; - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR, minor->debugfs_root, &vc4->load_tracker_enabled); @@ -781,7 +809,9 @@ int vc4_hvs_debugfs_init(struct drm_minor *minor) return 0; } -struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pdev) +struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, + void __iomem *regs, + struct platform_device *pdev) { struct drm_device *drm = &vc4->base; struct vc4_hvs *hvs; @@ -791,6 +821,7 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pde return ERR_PTR(-ENOMEM); hvs->vc4 = vc4; + hvs->regs = regs; hvs->pdev = pdev; spin_lock_init(&hvs->mm_lock); @@ -800,16 +831,17 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pde * our 16K), since we don't want to scramble the screen when * transitioning from the firmware's boot setup to runtime. */ + hvs->dlist_mem_size = (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END; drm_mm_init(&hvs->dlist_mm, HVS_BOOTLOADER_DLIST_END, - (SCALER_DLIST_SIZE >> 2) - HVS_BOOTLOADER_DLIST_END); + hvs->dlist_mem_size); /* Set up the HVS LBM memory manager. We could have some more * complicated data structure that allowed reuse of LBM areas * between planes when they don't overlap on the screen, but * for now we just allocate globally. */ - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) /* 48k words of 2x12-bit pixels */ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024); else @@ -821,79 +853,14 @@ struct vc4_hvs *__vc4_hvs_alloc(struct vc4_dev *vc4, struct platform_device *pde return hvs; } -static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) +static int vc4_hvs_hw_init(struct vc4_hvs *hvs) { - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = to_vc4_dev(drm); - struct vc4_hvs *hvs = NULL; - int ret; - u32 dispctrl; - u32 reg, top; - - hvs = __vc4_hvs_alloc(vc4, NULL); - if (IS_ERR(hvs)) - return PTR_ERR(hvs); - - hvs->regs = vc4_ioremap_regs(pdev, 0); - if (IS_ERR(hvs->regs)) - return PTR_ERR(hvs->regs); - - hvs->regset.base = hvs->regs; - hvs->regset.regs = hvs_regs; - hvs->regset.nregs = ARRAY_SIZE(hvs_regs); - - if (vc4->is_vc5) { - struct rpi_firmware *firmware; - struct device_node *node; - unsigned int max_rate; - - node = rpi_firmware_find_node(); - if (!node) - return -EINVAL; - - firmware = rpi_firmware_get(node); - of_node_put(node); - if (!firmware) - return -EPROBE_DEFER; - - hvs->core_clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(hvs->core_clk)) { - dev_err(&pdev->dev, "Couldn't get core clock\n"); - return PTR_ERR(hvs->core_clk); - } - - max_rate = rpi_firmware_clk_get_max_rate(firmware, - RPI_FIRMWARE_CORE_CLK_ID); - rpi_firmware_put(firmware); - if (max_rate >= 550000000) - hvs->vc5_hdmi_enable_hdmi_20 = true; - - if (max_rate >= 600000000) - hvs->vc5_hdmi_enable_4096by2160 = true; - - hvs->max_core_rate = max_rate; - - ret = clk_prepare_enable(hvs->core_clk); - if (ret) { - dev_err(&pdev->dev, "Couldn't enable the core clock\n"); - return ret; - } - } - - if (!vc4->is_vc5) - hvs->dlist = hvs->regs + SCALER_DLIST_START; - else - hvs->dlist = hvs->regs + SCALER5_DLIST_START; + struct vc4_dev *vc4 = hvs->vc4; + u32 dispctrl, reg; - /* Upload filter kernels. We only have the one for now, so we - * keep it around for the lifetime of the driver. - */ - ret = vc4_hvs_upload_linear_kernel(hvs, - &hvs->mitchell_netravali_filter, - mitchell_netravali_1_3_1_3_kernel); - if (ret) - return ret; + dispctrl = HVS_READ(SCALER_DISPCTRL); + dispctrl |= SCALER_DISPCTRL_ENABLE; + HVS_WRITE(SCALER_DISPCTRL, dispctrl); reg = HVS_READ(SCALER_DISPECTRL); reg &= ~SCALER_DISPECTRL_DSP2_MUX_MASK; @@ -916,13 +883,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) reg | VC4_SET_FIELD(3, SCALER_DISPDITHER_DSP5_MUX)); dispctrl = HVS_READ(SCALER_DISPCTRL); - - dispctrl |= SCALER_DISPCTRL_ENABLE; dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) | SCALER_DISPCTRL_DISPEIRQ(1) | SCALER_DISPCTRL_DISPEIRQ(2); - if (!vc4->is_vc5) + if (vc4->gen == VC4_GEN_4) dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ | SCALER_DISPCTRL_SLVWREIRQ | SCALER_DISPCTRL_SLVRDEIRQ | @@ -962,11 +927,33 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1); dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2); + /* Set AXI panic mode. + * VC4 panics when < 2 lines in FIFO. + * VC5 panics when less than 1 line in the FIFO. + */ + dispctrl &= ~(SCALER_DISPCTRL_PANIC0_MASK | + SCALER_DISPCTRL_PANIC1_MASK | + SCALER_DISPCTRL_PANIC2_MASK); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC0); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC1); + dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_PANIC2); + HVS_WRITE(SCALER_DISPCTRL, dispctrl); - /* Recompute Composite Output Buffer (COB) allocations for the displays + return 0; +} + +static int vc4_hvs_cob_init(struct vc4_hvs *hvs) +{ + struct vc4_dev *vc4 = hvs->vc4; + u32 reg, top; + + /* + * Recompute Composite Output Buffer (COB) allocations for the + * displays */ - if (!vc4->is_vc5) { + switch (vc4->gen) { + case VC4_GEN_4: /* The COB is 20736 pixels, or just over 10 lines at 2048 wide. * The bottom 2048 pixels are full 32bpp RGBA (intended for the * TXP composing RGBA to memory), whilst the remainder are only @@ -990,7 +977,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) top = VC4_COB_SIZE; reg |= (top - 1) << 16; HVS_WRITE(SCALER_DISPBASE0, reg); - } else { + break; + + case VC4_GEN_5: /* The COB is 44416 pixels, or 10.8 lines at 4096 wide. * The bottom 4096 pixels are full RGBA (intended for the TXP * composing RGBA to memory), whilst the remainder are only @@ -1016,8 +1005,96 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) top = VC5_COB_SIZE; reg |= top << 16; HVS_WRITE(SCALER_DISPBASE0, reg); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct drm_device *drm = dev_get_drvdata(master); + struct vc4_dev *vc4 = to_vc4_dev(drm); + struct vc4_hvs *hvs = NULL; + void __iomem *regs; + int ret; + + regs = vc4_ioremap_regs(pdev, 0); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + hvs = __vc4_hvs_alloc(vc4, regs, pdev); + if (IS_ERR(hvs)) + return PTR_ERR(hvs); + + hvs->regset.base = hvs->regs; + hvs->regset.regs = vc4_hvs_regs; + hvs->regset.nregs = ARRAY_SIZE(vc4_hvs_regs); + + if (vc4->gen == VC4_GEN_5) { + struct rpi_firmware *firmware; + struct device_node *node; + unsigned int max_rate; + + node = rpi_firmware_find_node(); + if (!node) + return -EINVAL; + + firmware = rpi_firmware_get(node); + of_node_put(node); + if (!firmware) + return -EPROBE_DEFER; + + hvs->core_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(hvs->core_clk)) { + dev_err(&pdev->dev, "Couldn't get core clock\n"); + return PTR_ERR(hvs->core_clk); + } + + max_rate = rpi_firmware_clk_get_max_rate(firmware, + RPI_FIRMWARE_CORE_CLK_ID); + rpi_firmware_put(firmware); + if (max_rate >= 550000000) + hvs->vc5_hdmi_enable_hdmi_20 = true; + + if (max_rate >= 600000000) + hvs->vc5_hdmi_enable_4096by2160 = true; + + hvs->max_core_rate = max_rate; + + ret = clk_prepare_enable(hvs->core_clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the core clock\n"); + return ret; + } } + if (vc4->gen == VC4_GEN_4) + hvs->dlist = hvs->regs + SCALER_DLIST_START; + else + hvs->dlist = hvs->regs + SCALER5_DLIST_START; + + ret = vc4_hvs_hw_init(hvs); + if (ret) + return ret; + + /* Upload filter kernels. We only have the one for now, so we + * keep it around for the lifetime of the driver. + */ + ret = vc4_hvs_upload_linear_kernel(hvs, + &hvs->mitchell_netravali_filter, + mitchell_netravali_1_3_1_3_kernel); + if (ret) + return ret; + + ret = vc4_hvs_cob_init(hvs); + if (ret) + return ret; + ret = devm_request_irq(dev, platform_get_irq(pdev, 0), vc4_hvs_irq_handler, 0, "vc4 hvs", drm); if (ret) diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index ef93d8e22a35..69b399f3b802 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -263,7 +263,7 @@ vc4_irq_enable(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (!vc4->v3d) @@ -280,7 +280,7 @@ vc4_irq_disable(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (!vc4->v3d) @@ -303,7 +303,7 @@ int vc4_irq_install(struct drm_device *dev, int irq) struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (irq == IRQ_NOTCONNECTED) @@ -324,7 +324,7 @@ void vc4_irq_uninstall(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; vc4_irq_disable(dev); @@ -337,7 +337,7 @@ void vc4_irq_reset(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; /* Acknowledge any stale IRQs. */ diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 5495f2a94fa9..58bbb9efc2df 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -369,7 +369,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) old_hvs_state->fifo_state[channel].pending_commit = NULL; } - if (vc4->is_vc5) { + if (vc4->gen == VC4_GEN_5) { unsigned long state_rate = max(old_hvs_state->core_clock_rate, new_hvs_state->core_clock_rate); unsigned long core_rate = clamp_t(unsigned long, state_rate, @@ -388,7 +388,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) vc4_ctm_commit(vc4, state); - if (vc4->is_vc5) + if (vc4->gen == VC4_GEN_5) vc5_hvs_pv_muxing_commit(vc4, state); else vc4_hvs_pv_muxing_commit(vc4, state); @@ -406,7 +406,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - if (vc4->is_vc5) { + if (vc4->gen == VC4_GEN_5) { unsigned long core_rate = min_t(unsigned long, hvs->max_core_rate, new_hvs_state->core_clock_rate); @@ -461,7 +461,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_mode_fb_cmd2 mode_cmd_local; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return ERR_PTR(-ENODEV); /* If the user didn't specify a modifier, use the @@ -1040,7 +1040,7 @@ int vc4_kms_load(struct drm_device *dev) * the BCM2711, but the load tracker computations are used for * the core clock rate calculation. */ - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { /* Start with the load tracker enabled. Can be * disabled through the debugfs load_tracker file. */ @@ -1056,7 +1056,7 @@ int vc4_kms_load(struct drm_device *dev) return ret; } - if (vc4->is_vc5) { + if (vc4->gen == VC4_GEN_5) { dev->mode_config.max_width = 7680; dev->mode_config.max_height = 7680; } else { @@ -1064,7 +1064,7 @@ int vc4_kms_load(struct drm_device *dev) dev->mode_config.max_height = 2048; } - dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs; + dev->mode_config.funcs = (vc4->gen > VC4_GEN_4) ? &vc5_mode_funcs : &vc4_mode_funcs; dev->mode_config.helper_private = &vc4_mode_config_helpers; dev->mode_config.preferred_depth = 24; dev->mode_config.async_page_flip = true; diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index c4ac2c946238..4cd3643c3ba7 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -23,7 +23,7 @@ void vc4_perfmon_get(struct vc4_perfmon *perfmon) return; vc4 = perfmon->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; refcount_inc(&perfmon->refcnt); @@ -37,7 +37,7 @@ void vc4_perfmon_put(struct vc4_perfmon *perfmon) return; vc4 = perfmon->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (refcount_dec_and_test(&perfmon->refcnt)) @@ -49,7 +49,7 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon) unsigned int i; u32 mask; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon)) @@ -69,7 +69,7 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, { unsigned int i; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; if (WARN_ON_ONCE(!vc4->active_perfmon || @@ -90,7 +90,7 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) struct vc4_dev *vc4 = vc4file->dev; struct vc4_perfmon *perfmon; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return NULL; mutex_lock(&vc4file->perfmon.lock); @@ -105,7 +105,7 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file) { struct vc4_dev *vc4 = vc4file->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_init(&vc4file->perfmon.lock); @@ -126,7 +126,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file) { struct vc4_dev *vc4 = vc4file->dev; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4file->perfmon.lock); @@ -146,7 +146,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, unsigned int i; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -200,7 +200,7 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vc4_perfmon_destroy *req = data; struct vc4_perfmon *perfmon; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { @@ -228,7 +228,7 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, struct vc4_perfmon *perfmon; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (!vc4->v3d) { diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 07caf2a47c6c..ba6e86d62a77 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -110,6 +110,18 @@ static const struct hvs_format { .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB, }, { + .drm = DRM_FORMAT_YUV444, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, + .pixel_order = HVS_PIXEL_ORDER_XYCBCR, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCBCR, + }, + { + .drm = DRM_FORMAT_YVU444, + .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV422_3PLANE, + .pixel_order = HVS_PIXEL_ORDER_XYCRCB, + .pixel_order_hvs5 = HVS_PIXEL_ORDER_XYCRCB, + }, + { .drm = DRM_FORMAT_YUV420, .hvs = HVS_PIXEL_FORMAT_YCBCR_YUV420_3PLANE, .pixel_order = HVS_PIXEL_ORDER_XYCBCR, @@ -251,9 +263,9 @@ static const struct hvs_format *vc4_get_hvs_format(u32 drm_format) static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst) { - if (dst == src) + if (dst == src >> 16) return VC4_SCALING_NONE; - if (3 * dst >= 2 * src) + if (3 * dst >= 2 * (src >> 16)) return VC4_SCALING_PPF; else return VC4_SCALING_TPZ; @@ -438,12 +450,11 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) { struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct drm_framebuffer *fb = state->fb; - struct drm_gem_dma_object *bo; int num_planes = fb->format->num_planes; struct drm_crtc_state *crtc_state; u32 h_subsample = fb->format->hsub; u32 v_subsample = fb->format->vsub; - int i, ret; + int ret; crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc); @@ -457,20 +468,10 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) if (ret) return ret; - for (i = 0; i < num_planes; i++) { - bo = drm_fb_dma_get_gem_obj(fb, i); - vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i]; - } - - /* - * We don't support subpixel source positioning for scaling, - * but fractional coordinates can be generated by clipping - * so just round for now - */ - vc4_state->src_x = DIV_ROUND_CLOSEST(state->src.x1, 1 << 16); - vc4_state->src_y = DIV_ROUND_CLOSEST(state->src.y1, 1 << 16); - vc4_state->src_w[0] = DIV_ROUND_CLOSEST(state->src.x2, 1 << 16) - vc4_state->src_x; - vc4_state->src_h[0] = DIV_ROUND_CLOSEST(state->src.y2, 1 << 16) - vc4_state->src_y; + vc4_state->src_x = state->src.x1; + vc4_state->src_y = state->src.y1; + vc4_state->src_w[0] = state->src.x2 - vc4_state->src_x; + vc4_state->src_h[0] = state->src.y2 - vc4_state->src_y; vc4_state->crtc_x = state->dst.x1; vc4_state->crtc_y = state->dst.y1; @@ -510,6 +511,12 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) */ if (vc4_state->x_scaling[1] == VC4_SCALING_NONE) vc4_state->x_scaling[1] = VC4_SCALING_PPF; + + /* Similarly UV needs vertical scaling to be enabled. + * Without this a 1:1 scaled YUV422 plane isn't rendered. + */ + if (vc4_state->y_scaling[1] == VC4_SCALING_NONE) + vc4_state->y_scaling[1] = VC4_SCALING_PPF; } else { vc4_state->is_yuv = false; vc4_state->x_scaling[1] = VC4_SCALING_NONE; @@ -523,7 +530,7 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst) { u32 scale, recip; - scale = (1 << 16) * src / dst; + scale = src / dst; /* The specs note that while the reciprocal would be defined * as (1<<32)/scale, ~0 is close enough. @@ -537,14 +544,61 @@ static void vc4_write_tpz(struct vc4_plane_state *vc4_state, u32 src, u32 dst) VC4_SET_FIELD(recip, SCALER_TPZ1_RECIP)); } -static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) +/* phase magnitude bits */ +#define PHASE_BITS 6 + +static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst, + u32 xy, int channel) { - u32 scale = (1 << 16) * src / dst; + u32 scale = src / dst; + s32 offset, offset2; + s32 phase; + + /* + * Start the phase at 1/2 pixel from the 1st pixel at src_x. + * 1/4 pixel for YUV. + */ + if (channel) { + /* + * The phase is relative to scale_src->x, so shift it for + * display list's x value + */ + offset = (xy & 0x1ffff) >> (16 - PHASE_BITS) >> 1; + offset += -(1 << PHASE_BITS >> 2); + } else { + /* + * The phase is relative to scale_src->x, so shift it for + * display list's x value + */ + offset = (xy & 0xffff) >> (16 - PHASE_BITS); + offset += -(1 << PHASE_BITS >> 1); + + /* + * This is a kludge to make sure the scaling factors are + * consistent with YUV's luma scaling. We lose 1-bit precision + * because of this. + */ + scale &= ~1; + } + + /* + * There may be a also small error introduced by precision of scale. + * Add half of that as a compromise + */ + offset2 = src - dst * scale; + offset2 >>= 16 - PHASE_BITS; + phase = offset + (offset2 >> 1); + + /* Ensure +ve values don't touch the sign bit, then truncate negative values */ + if (phase >= 1 << PHASE_BITS) + phase = (1 << PHASE_BITS) - 1; + + phase &= SCALER_PPF_IPHASE_MASK; vc4_dlist_write(vc4_state, SCALER_PPF_AGC | VC4_SET_FIELD(scale, SCALER_PPF_SCALE) | - VC4_SET_FIELD(0, SCALER_PPF_IPHASE)); + VC4_SET_FIELD(phase, SCALER_PPF_IPHASE)); } static u32 vc4_lbm_size(struct drm_plane_state *state) @@ -569,7 +623,7 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) if (vc4_state->x_scaling[0] == VC4_SCALING_TPZ) pix_per_line = vc4_state->crtc_w; else - pix_per_line = vc4_state->src_w[0]; + pix_per_line = vc4_state->src_w[0] >> 16; if (!vc4_state->is_yuv) { if (vc4_state->y_scaling[0] == VC4_SCALING_TPZ) @@ -587,10 +641,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) } /* Align it to 64 or 128 (hvs5) bytes */ - lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64); + lbm = roundup(lbm, vc4->gen == VC4_GEN_5 ? 128 : 64); /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */ - lbm /= vc4->is_vc5 ? 4 : 2; + lbm /= vc4->gen == VC4_GEN_5 ? 4 : 2; return lbm; } @@ -602,27 +656,27 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state, /* Ch0 H-PPF Word 0: Scaling Parameters */ if (vc4_state->x_scaling[channel] == VC4_SCALING_PPF) { - vc4_write_ppf(vc4_state, - vc4_state->src_w[channel], vc4_state->crtc_w); + vc4_write_ppf(vc4_state, vc4_state->src_w[channel], + vc4_state->crtc_w, vc4_state->src_x, channel); } /* Ch0 V-PPF Words 0-1: Scaling Parameters, Context */ if (vc4_state->y_scaling[channel] == VC4_SCALING_PPF) { - vc4_write_ppf(vc4_state, - vc4_state->src_h[channel], vc4_state->crtc_h); + vc4_write_ppf(vc4_state, vc4_state->src_h[channel], + vc4_state->crtc_h, vc4_state->src_y, channel); vc4_dlist_write(vc4_state, 0xc0c0c0c0); } /* Ch0 H-TPZ Words 0-1: Scaling Parameters, Recip */ if (vc4_state->x_scaling[channel] == VC4_SCALING_TPZ) { - vc4_write_tpz(vc4_state, - vc4_state->src_w[channel], vc4_state->crtc_w); + vc4_write_tpz(vc4_state, vc4_state->src_w[channel], + vc4_state->crtc_w); } /* Ch0 V-TPZ Words 0-2: Scaling Parameters, Recip, Context */ if (vc4_state->y_scaling[channel] == VC4_SCALING_TPZ) { - vc4_write_tpz(vc4_state, - vc4_state->src_h[channel], vc4_state->crtc_h); + vc4_write_tpz(vc4_state, vc4_state->src_h[channel], + vc4_state->crtc_h); vc4_dlist_write(vc4_state, 0xc0c0c0c0); } } @@ -660,7 +714,8 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) for (i = 0; i < fb->format->num_planes; i++) { /* Even if the bandwidth/plane required for a single frame is * - * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh + * (vc4_state->src_w[i] >> 16) * (vc4_state->src_h[i] >> 16) * + * cpp * vrefresh * * when downscaling, we have to read more pixels per line in * the time frame reserved for a single line, so the bandwidth @@ -669,11 +724,11 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) * load by this number. We're likely over-estimating the read * demand, but that's better than under-estimating it. */ - vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i], + vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i] >> 16, vc4_state->crtc_h); - vc4_state->membus_load += vc4_state->src_w[i] * - vc4_state->src_h[i] * vscale_factor * - fb->format->cpp[i]; + vc4_state->membus_load += (vc4_state->src_w[i] >> 16) * + (vc4_state->src_h[i] >> 16) * + vscale_factor * fb->format->cpp[i]; vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w; } @@ -684,7 +739,9 @@ static void vc4_plane_calc_load(struct drm_plane_state *state) static int vc4_plane_allocate_lbm(struct drm_plane_state *state) { - struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); + struct drm_device *drm = state->plane->dev; + struct vc4_dev *vc4 = to_vc4_dev(drm); + struct drm_plane *plane = state->plane; struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); unsigned long irqflags; u32 lbm_size; @@ -693,6 +750,14 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) if (!lbm_size) return 0; + if (vc4->gen == VC4_GEN_5) + lbm_size = ALIGN(lbm_size, 64); + else if (vc4->gen == VC4_GEN_4) + lbm_size = ALIGN(lbm_size, 32); + + drm_dbg_driver(drm, "[PLANE:%d:%s] LBM Allocation Size: %u\n", + plane->base.id, plane->name, lbm_size); + if (WARN_ON(!vc4_state->lbm_offset)) return -EINVAL; @@ -705,13 +770,14 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, &vc4_state->lbm, - lbm_size, - vc4->is_vc5 ? 64 : 32, + lbm_size, 1, 0, 0); spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); - if (ret) + if (ret) { + drm_err(drm, "Failed to allocate LBM entry: %d\n", ret); return ret; + } } else { WARN_ON_ONCE(lbm_size != vc4_state->lbm.size); } @@ -826,9 +892,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane, bool mix_plane_alpha; bool covers_screen; u32 scl0, scl1, pitch0; - u32 tiling, src_y; + u32 tiling, src_x, src_y; + u32 width, height; u32 hvs_format = format->hvs; unsigned int rotation; + u32 offsets[3] = { 0 }; int ret, i; if (vc4_state->dlist_initialized) @@ -838,6 +906,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane, if (ret) return ret; + width = vc4_state->src_w[0] >> 16; + height = vc4_state->src_h[0] >> 16; + /* SCL1 is used for Cb/Cr scaling of planar formats. For RGB * and 4:4:4, scl1 should be set to scl0 so both channels of * the scaler do the same thing. For YUV, the Y plane needs @@ -858,9 +929,11 @@ static int vc4_plane_mode_set(struct drm_plane *plane, DRM_MODE_REFLECT_Y); /* We must point to the last line when Y reflection is enabled. */ - src_y = vc4_state->src_y; + src_y = vc4_state->src_y >> 16; if (rotation & DRM_MODE_REFLECT_Y) - src_y += vc4_state->src_h[0] - 1; + src_y += height - 1; + + src_x = vc4_state->src_x >> 16; switch (base_format_mod) { case DRM_FORMAT_MOD_LINEAR: @@ -871,13 +944,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * out. */ for (i = 0; i < num_planes; i++) { - vc4_state->offsets[i] += src_y / - (i ? v_subsample : 1) * - fb->pitches[i]; - - vc4_state->offsets[i] += vc4_state->src_x / - (i ? h_subsample : 1) * - fb->format->cpp[i]; + offsets[i] += src_y / (i ? v_subsample : 1) * fb->pitches[i]; + offsets[i] += src_x / (i ? h_subsample : 1) * fb->format->cpp[i]; } break; @@ -898,7 +966,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * pitch * tile_h == tile_size * tiles_per_row */ u32 tiles_w = fb->pitches[0] >> (tile_size_shift - tile_h_shift); - u32 tiles_l = vc4_state->src_x >> tile_w_shift; + u32 tiles_l = src_x >> tile_w_shift; u32 tiles_r = tiles_w - tiles_l; u32 tiles_t = src_y >> tile_h_shift; /* Intra-tile offsets, which modify the base address (the @@ -908,7 +976,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, u32 tile_y = (src_y >> 4) & 1; u32 subtile_y = (src_y >> 2) & 3; u32 utile_y = src_y & 3; - u32 x_off = vc4_state->src_x & tile_w_mask; + u32 x_off = src_x & tile_w_mask; u32 y_off = src_y & tile_h_mask; /* When Y reflection is requested we must set the @@ -932,19 +1000,18 @@ static int vc4_plane_mode_set(struct drm_plane *plane, VC4_SET_FIELD(y_off, SCALER_PITCH0_TILE_Y_OFFSET) | VC4_SET_FIELD(tiles_l, SCALER_PITCH0_TILE_WIDTH_L) | VC4_SET_FIELD(tiles_r, SCALER_PITCH0_TILE_WIDTH_R)); - vc4_state->offsets[0] += tiles_t * (tiles_w << tile_size_shift); - vc4_state->offsets[0] += subtile_y << 8; - vc4_state->offsets[0] += utile_y << 4; + offsets[0] += tiles_t * (tiles_w << tile_size_shift); + offsets[0] += subtile_y << 8; + offsets[0] += utile_y << 4; /* Rows of tiles alternate left-to-right and right-to-left. */ if (tiles_t & 1) { pitch0 |= SCALER_PITCH0_TILE_INITIAL_LINE_DIR; - vc4_state->offsets[0] += (tiles_w - tiles_l) << - tile_size_shift; - vc4_state->offsets[0] -= (1 + !tile_y) << 10; + offsets[0] += (tiles_w - tiles_l) << tile_size_shift; + offsets[0] -= (1 + !tile_y) << 10; } else { - vc4_state->offsets[0] += tiles_l << tile_size_shift; - vc4_state->offsets[0] += tile_y << 10; + offsets[0] += tiles_l << tile_size_shift; + offsets[0] += tile_y << 10; } break; @@ -1004,7 +1071,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * of the 12-pixels in that 128-bit word is the * first pixel to be used */ - u32 remaining_pixels = vc4_state->src_x % 96; + u32 remaining_pixels = src_x % 96; u32 aligned = remaining_pixels / 12; u32 last_bits = remaining_pixels % 12; @@ -1026,18 +1093,16 @@ static int vc4_plane_mode_set(struct drm_plane *plane, return -EINVAL; } pix_per_tile = tile_w / fb->format->cpp[0]; - x_off = (vc4_state->src_x % pix_per_tile) / + x_off = (src_x % pix_per_tile) / (i ? h_subsample : 1) * fb->format->cpp[i]; } - tile = vc4_state->src_x / pix_per_tile; + tile = src_x / pix_per_tile; - vc4_state->offsets[i] += param * tile_w * tile; - vc4_state->offsets[i] += src_y / - (i ? v_subsample : 1) * - tile_w; - vc4_state->offsets[i] += x_off & ~(i ? 1 : 0); + offsets[i] += param * tile_w * tile; + offsets[i] += src_y / (i ? v_subsample : 1) * tile_w; + offsets[i] += x_off & ~(i ? 1 : 0); } pitch0 = VC4_SET_FIELD(param, SCALER_TILE_HEIGHT); @@ -1050,6 +1115,30 @@ static int vc4_plane_mode_set(struct drm_plane *plane, return -EINVAL; } + /* fetch an extra pixel if we don't actually line up with the left edge. */ + if ((vc4_state->src_x & 0xffff) && vc4_state->src_x < (state->fb->width << 16)) + width++; + + /* same for the right side */ + if (((vc4_state->src_x + vc4_state->src_w[0]) & 0xffff) && + vc4_state->src_x + vc4_state->src_w[0] < (state->fb->width << 16)) + width++; + + /* now for the top */ + if ((vc4_state->src_y & 0xffff) && vc4_state->src_y < (state->fb->height << 16)) + height++; + + /* and the bottom */ + if (((vc4_state->src_y + vc4_state->src_h[0]) & 0xffff) && + vc4_state->src_y + vc4_state->src_h[0] < (state->fb->height << 16)) + height++; + + /* For YUV444 the hardware wants double the width, otherwise it doesn't + * fetch full width of chroma + */ + if (format->drm == DRM_FORMAT_YUV444 || format->drm == DRM_FORMAT_YVU444) + width <<= 1; + /* Don't waste cycles mixing with plane alpha if the set alpha * is opaque or there is no per-pixel alpha information. * In any case we use the alpha property value as the fixed alpha. @@ -1057,7 +1146,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && fb->format->has_alpha; - if (!vc4->is_vc5) { + if (vc4->gen == VC4_GEN_4) { /* Control word */ vc4_dlist_write(vc4_state, SCALER_CTL0_VALID | @@ -1092,10 +1181,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, vc4_dlist_write(vc4_state, (mix_plane_alpha ? SCALER_POS2_ALPHA_MIX : 0) | vc4_hvs4_get_alpha_blend_mode(state) | - VC4_SET_FIELD(vc4_state->src_w[0], - SCALER_POS2_WIDTH) | - VC4_SET_FIELD(vc4_state->src_h[0], - SCALER_POS2_HEIGHT)); + VC4_SET_FIELD(width, SCALER_POS2_WIDTH) | + VC4_SET_FIELD(height, SCALER_POS2_HEIGHT)); /* Position Word 3: Context. Written by the HVS. */ vc4_dlist_write(vc4_state, 0xc0c0c0c0); @@ -1148,10 +1235,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane, /* Position Word 2: Source Image Size */ vc4_state->pos2_offset = vc4_state->dlist_count; vc4_dlist_write(vc4_state, - VC4_SET_FIELD(vc4_state->src_w[0], - SCALER5_POS2_WIDTH) | - VC4_SET_FIELD(vc4_state->src_h[0], - SCALER5_POS2_HEIGHT)); + VC4_SET_FIELD(width, SCALER5_POS2_WIDTH) | + VC4_SET_FIELD(height, SCALER5_POS2_HEIGHT)); /* Position Word 3: Context. Written by the HVS. */ vc4_dlist_write(vc4_state, 0xc0c0c0c0); @@ -1162,9 +1247,13 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * * The pointers may be any byte address. */ - vc4_state->ptr0_offset = vc4_state->dlist_count; - for (i = 0; i < num_planes; i++) - vc4_dlist_write(vc4_state, vc4_state->offsets[i]); + vc4_state->ptr0_offset[0] = vc4_state->dlist_count; + + for (i = 0; i < num_planes; i++) { + struct drm_gem_dma_object *bo = drm_fb_dma_get_gem_obj(fb, i); + + vc4_dlist_write(vc4_state, bo->dma_addr + fb->offsets[i] + offsets[i]); + } /* Pointer Context Word 0/1/2: Written by the HVS */ for (i = 0; i < num_planes; i++) @@ -1298,7 +1387,11 @@ static int vc4_plane_atomic_check(struct drm_plane *plane, if (ret) return ret; - return vc4_plane_allocate_lbm(new_plane_state); + ret = vc4_plane_allocate_lbm(new_plane_state); + if (ret) + return ret; + + return 0; } static void vc4_plane_atomic_update(struct drm_plane *plane, @@ -1362,13 +1455,13 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) * scanout will start from this address as soon as the FIFO * needs to refill with pixels. */ - writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset]); + writel(addr, &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]); /* Also update the CPU-side dlist copy, so that any later * atomic updates that don't do a new modeset on our plane * also use our updated address. */ - vc4_state->dlist[vc4_state->ptr0_offset] = addr; + vc4_state->dlist[vc4_state->ptr0_offset[0]] = addr; drm_dev_exit(idx); } @@ -1423,8 +1516,6 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, sizeof(vc4_state->y_scaling)); vc4_state->is_unity = new_vc4_state->is_unity; vc4_state->is_yuv = new_vc4_state->is_yuv; - memcpy(vc4_state->offsets, new_vc4_state->offsets, - sizeof(vc4_state->offsets)); vc4_state->needs_bg_fill = new_vc4_state->needs_bg_fill; /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */ @@ -1432,8 +1523,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, new_vc4_state->dlist[vc4_state->pos0_offset]; vc4_state->dlist[vc4_state->pos2_offset] = new_vc4_state->dlist[vc4_state->pos2_offset]; - vc4_state->dlist[vc4_state->ptr0_offset] = - new_vc4_state->dlist[vc4_state->ptr0_offset]; + vc4_state->dlist[vc4_state->ptr0_offset[0]] = + new_vc4_state->dlist[vc4_state->ptr0_offset[0]]; /* Note that we can't just call vc4_plane_write_dlist() * because that would smash the context data that the HVS is @@ -1443,8 +1534,8 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, &vc4_state->hw_dlist[vc4_state->pos0_offset]); writel(vc4_state->dlist[vc4_state->pos2_offset], &vc4_state->hw_dlist[vc4_state->pos2_offset]); - writel(vc4_state->dlist[vc4_state->ptr0_offset], - &vc4_state->hw_dlist[vc4_state->ptr0_offset]); + writel(vc4_state->dlist[vc4_state->ptr0_offset[0]], + &vc4_state->hw_dlist[vc4_state->ptr0_offset[0]]); drm_dev_exit(idx); } @@ -1471,7 +1562,7 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane, if (old_vc4_state->dlist_count != new_vc4_state->dlist_count || old_vc4_state->pos0_offset != new_vc4_state->pos0_offset || old_vc4_state->pos2_offset != new_vc4_state->pos2_offset || - old_vc4_state->ptr0_offset != new_vc4_state->ptr0_offset || + old_vc4_state->ptr0_offset[0] != new_vc4_state->ptr0_offset[0] || vc4_lbm_size(plane->state) != vc4_lbm_size(new_plane_state)) return -EINVAL; @@ -1481,7 +1572,7 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane, for (i = 0; i < new_vc4_state->dlist_count; i++) { if (i == new_vc4_state->pos0_offset || i == new_vc4_state->pos2_offset || - i == new_vc4_state->ptr0_offset || + i == new_vc4_state->ptr0_offset[0] || (new_vc4_state->lbm_offset && i == new_vc4_state->lbm_offset)) continue; @@ -1632,7 +1723,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, }; for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { - if (!hvs_formats[i].hvs5_only || vc4->is_vc5) { + if (!hvs_formats[i].hvs5_only || vc4->gen == VC4_GEN_5) { formats[num_formats] = hvs_formats[i].drm; num_formats++; } @@ -1647,7 +1738,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, return ERR_CAST(vc4_plane); plane = &vc4_plane->base; - if (vc4->is_vc5) + if (vc4->gen == VC4_GEN_5) drm_plane_helper_add(plane, &vc5_plane_helper_funcs); else drm_plane_helper_add(plane, &vc4_plane_helper_funcs); diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 8ac9515554f8..c55dec383929 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -777,6 +777,7 @@ enum { # define VC4_HD_VID_CTL_CLRSYNC BIT(24) # define VC4_HD_VID_CTL_CLRRGB BIT(23) # define VC4_HD_VID_CTL_BLANKPIX BIT(18) +# define VC4_HD_VID_CTL_BLANK_INSERT_EN BIT(16) # define VC4_HD_CSC_CTL_ORDER_MASK VC4_MASK(7, 5) # define VC4_HD_CSC_CTL_ORDER_SHIFT 5 diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 1bda5010f15a..14079853338e 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -599,7 +599,7 @@ int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) bool has_bin = args->bin_cl_size != 0; int ret; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; if (args->min_x_tile > args->max_x_tile || diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index bf5c4e36c94e..2423826c89eb 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -127,7 +127,7 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) int vc4_v3d_pm_get(struct vc4_dev *vc4) { - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; mutex_lock(&vc4->power_lock); @@ -148,7 +148,7 @@ vc4_v3d_pm_get(struct vc4_dev *vc4) void vc4_v3d_pm_put(struct vc4_dev *vc4) { - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4->power_lock); @@ -178,7 +178,7 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4) uint64_t seqno = 0; struct vc4_exec_info *exec; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; try_again: @@ -325,7 +325,7 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used) { int ret = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; mutex_lock(&vc4->bin_bo_lock); @@ -360,7 +360,7 @@ static void bin_bo_release(struct kref *ref) void vc4_v3d_bin_bo_put(struct vc4_dev *vc4) { - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return; mutex_lock(&vc4->bin_bo_lock); diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index 0c17284bf6f5..5bf134968ade 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) struct drm_gem_dma_object *obj; struct vc4_bo *bo; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return NULL; if (hindex >= exec->bo_count) { @@ -169,7 +169,7 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_dma_object *fbo, uint32_t utile_w = utile_width(cpp); uint32_t utile_h = utile_height(cpp); - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return false; /* The shaded vertex format stores signed 12.4 fixed point @@ -495,7 +495,7 @@ vc4_validate_bin_cl(struct drm_device *dev, uint32_t dst_offset = 0; uint32_t src_offset = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; while (src_offset < len) { @@ -942,7 +942,7 @@ vc4_validate_shader_recs(struct drm_device *dev, uint32_t i; int ret = 0; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV; for (i = 0; i < exec->shader_state_count; i++) { diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index 9745f8810eca..2d74e786914c 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -786,7 +786,7 @@ vc4_validate_shader(struct drm_gem_dma_object *shader_obj) struct vc4_validated_shader_info *validated_shader = NULL; struct vc4_shader_validation_state validation_state; - if (WARN_ON_ONCE(vc4->is_vc5)) + if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return NULL; memset(&validation_state, 0, sizeof(validation_state)); diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 7bbe46a98ff1..ebd0879e04d4 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -14,6 +14,7 @@ config DRM_XE select DRM_PANEL select DRM_SUBALLOC_HELPER select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_DSC_HELPER select DRM_DISPLAY_HDCP_HELPER select DRM_DISPLAY_HDMI_HELPER select DRM_DISPLAY_HELPER diff --git a/drivers/gpu/host1x/context_bus.c b/drivers/gpu/host1x/context_bus.c index d9421179d7b4..7cd0e1a5edd1 100644 --- a/drivers/gpu/host1x/context_bus.c +++ b/drivers/gpu/host1x/context_bus.c @@ -6,7 +6,7 @@ #include <linux/device.h> #include <linux/of.h> -struct bus_type host1x_context_device_bus_type = { +const struct bus_type host1x_context_device_bus_type = { .name = "host1x-context", }; EXPORT_SYMBOL_GPL(host1x_context_device_bus_type); diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index b62e4f0e8130..bc3cda4625d0 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -142,18 +142,29 @@ static const struct host1x_info host1x05_info = { }; static const struct host1x_sid_entry tegra186_sid_table[] = { - { - /* VIC */ - .base = 0x1af0, - .offset = 0x30, - .limit = 0x34 - }, - { - /* NVDEC */ - .base = 0x1b00, - .offset = 0x30, - .limit = 0x34 - }, + { /* SE1 */ .base = 0x1ac8, .offset = 0x90, .limit = 0x90 }, + { /* SE2 */ .base = 0x1ad0, .offset = 0x90, .limit = 0x90 }, + { /* SE3 */ .base = 0x1ad8, .offset = 0x90, .limit = 0x90 }, + { /* SE4 */ .base = 0x1ae0, .offset = 0x90, .limit = 0x90 }, + { /* ISP */ .base = 0x1ae8, .offset = 0x50, .limit = 0x50 }, + { /* VIC */ .base = 0x1af0, .offset = 0x30, .limit = 0x34 }, + { /* NVENC */ .base = 0x1af8, .offset = 0x30, .limit = 0x34 }, + { /* NVDEC */ .base = 0x1b00, .offset = 0x30, .limit = 0x34 }, + { /* NVJPG */ .base = 0x1b08, .offset = 0x30, .limit = 0x34 }, + { /* TSEC */ .base = 0x1b10, .offset = 0x30, .limit = 0x34 }, + { /* TSECB */ .base = 0x1b18, .offset = 0x30, .limit = 0x34 }, + { /* VI 0 */ .base = 0x1b80, .offset = 0x10000, .limit = 0x10000 }, + { /* VI 1 */ .base = 0x1b88, .offset = 0x20000, .limit = 0x20000 }, + { /* VI 2 */ .base = 0x1b90, .offset = 0x30000, .limit = 0x30000 }, + { /* VI 3 */ .base = 0x1b98, .offset = 0x40000, .limit = 0x40000 }, + { /* VI 4 */ .base = 0x1ba0, .offset = 0x50000, .limit = 0x50000 }, + { /* VI 5 */ .base = 0x1ba8, .offset = 0x60000, .limit = 0x60000 }, + { /* VI 6 */ .base = 0x1bb0, .offset = 0x70000, .limit = 0x70000 }, + { /* VI 7 */ .base = 0x1bb8, .offset = 0x80000, .limit = 0x80000 }, + { /* VI 8 */ .base = 0x1bc0, .offset = 0x90000, .limit = 0x90000 }, + { /* VI 9 */ .base = 0x1bc8, .offset = 0xa0000, .limit = 0xa0000 }, + { /* VI 10 */ .base = 0x1bd0, .offset = 0xb0000, .limit = 0xb0000 }, + { /* VI 11 */ .base = 0x1bd8, .offset = 0xc0000, .limit = 0xc0000 }, }; static const struct host1x_info host1x06_info = { @@ -173,24 +184,26 @@ static const struct host1x_info host1x06_info = { }; static const struct host1x_sid_entry tegra194_sid_table[] = { - { - /* VIC */ - .base = 0x1af0, - .offset = 0x30, - .limit = 0x34 - }, - { - /* NVDEC */ - .base = 0x1b00, - .offset = 0x30, - .limit = 0x34 - }, - { - /* NVDEC1 */ - .base = 0x1bc0, - .offset = 0x30, - .limit = 0x34 - }, + { /* SE1 */ .base = 0x1ac8, .offset = 0x90, .limit = 0x90 }, + { /* SE2 */ .base = 0x1ad0, .offset = 0x90, .limit = 0x90 }, + { /* SE3 */ .base = 0x1ad8, .offset = 0x90, .limit = 0x90 }, + { /* SE4 */ .base = 0x1ae0, .offset = 0x90, .limit = 0x90 }, + { /* ISP */ .base = 0x1ae8, .offset = 0x800, .limit = 0x800 }, + { /* VIC */ .base = 0x1af0, .offset = 0x30, .limit = 0x34 }, + { /* NVENC */ .base = 0x1af8, .offset = 0x30, .limit = 0x34 }, + { /* NVDEC */ .base = 0x1b00, .offset = 0x30, .limit = 0x34 }, + { /* NVJPG */ .base = 0x1b08, .offset = 0x30, .limit = 0x34 }, + { /* TSEC */ .base = 0x1b10, .offset = 0x30, .limit = 0x34 }, + { /* TSECB */ .base = 0x1b18, .offset = 0x30, .limit = 0x34 }, + { /* VI */ .base = 0x1b80, .offset = 0x800, .limit = 0x800 }, + { /* VI_THI */ .base = 0x1b88, .offset = 0x30, .limit = 0x34 }, + { /* ISP_THI */ .base = 0x1b90, .offset = 0x30, .limit = 0x34 }, + { /* PVA0_CLUSTER */ .base = 0x1b98, .offset = 0x0, .limit = 0x0 }, + { /* PVA0_CLUSTER */ .base = 0x1ba0, .offset = 0x0, .limit = 0x0 }, + { /* NVDLA0 */ .base = 0x1ba8, .offset = 0x30, .limit = 0x34 }, + { /* NVDLA1 */ .base = 0x1bb0, .offset = 0x30, .limit = 0x34 }, + { /* NVENC1 */ .base = 0x1bb8, .offset = 0x30, .limit = 0x34 }, + { /* NVDEC1 */ .base = 0x1bc0, .offset = 0x30, .limit = 0x34 }, }; static const struct host1x_info host1x07_info = { @@ -215,54 +228,35 @@ static const struct host1x_info host1x07_info = { * and firmware stream ID in the MMIO path table. */ static const struct host1x_sid_entry tegra234_sid_table[] = { - { - /* SE2 MMIO */ - .base = 0x1658, - .offset = 0x90, - .limit = 0x90 - }, - { - /* SE4 MMIO */ - .base = 0x1660, - .offset = 0x90, - .limit = 0x90 - }, - { - /* SE2 channel */ - .base = 0x1738, - .offset = 0x90, - .limit = 0x90 - }, - { - /* SE4 channel */ - .base = 0x1740, - .offset = 0x90, - .limit = 0x90 - }, - { - /* VIC channel */ - .base = 0x17b8, - .offset = 0x30, - .limit = 0x30 - }, - { - /* VIC MMIO */ - .base = 0x1688, - .offset = 0x34, - .limit = 0x34 - }, - { - /* NVDEC channel */ - .base = 0x17c8, - .offset = 0x30, - .limit = 0x30, - }, - { - /* NVDEC MMIO */ - .base = 0x1698, - .offset = 0x34, - .limit = 0x34, - }, + { /* SE1 MMIO */ .base = 0x1650, .offset = 0x90, .limit = 0x90 }, + { /* SE1 ch */ .base = 0x1730, .offset = 0x90, .limit = 0x90 }, + { /* SE2 MMIO */ .base = 0x1658, .offset = 0x90, .limit = 0x90 }, + { /* SE2 ch */ .base = 0x1738, .offset = 0x90, .limit = 0x90 }, + { /* SE4 MMIO */ .base = 0x1660, .offset = 0x90, .limit = 0x90 }, + { /* SE4 ch */ .base = 0x1740, .offset = 0x90, .limit = 0x90 }, + { /* ISP MMIO */ .base = 0x1680, .offset = 0x800, .limit = 0x800 }, + { /* VIC MMIO */ .base = 0x1688, .offset = 0x34, .limit = 0x34 }, + { /* VIC ch */ .base = 0x17b8, .offset = 0x30, .limit = 0x30 }, + { /* NVENC MMIO */ .base = 0x1690, .offset = 0x34, .limit = 0x34 }, + { /* NVENC ch */ .base = 0x17c0, .offset = 0x30, .limit = 0x30 }, + { /* NVDEC MMIO */ .base = 0x1698, .offset = 0x34, .limit = 0x34 }, + { /* NVDEC ch */ .base = 0x17c8, .offset = 0x30, .limit = 0x30 }, + { /* NVJPG MMIO */ .base = 0x16a0, .offset = 0x34, .limit = 0x34 }, + { /* NVJPG ch */ .base = 0x17d0, .offset = 0x30, .limit = 0x30 }, + { /* TSEC MMIO */ .base = 0x16a8, .offset = 0x30, .limit = 0x34 }, + { /* NVJPG1 MMIO */ .base = 0x16b0, .offset = 0x34, .limit = 0x34 }, + { /* NVJPG1 ch */ .base = 0x17a8, .offset = 0x30, .limit = 0x30 }, + { /* VI MMIO */ .base = 0x16b8, .offset = 0x800, .limit = 0x800 }, + { /* VI_THI MMIO */ .base = 0x16c0, .offset = 0x30, .limit = 0x34 }, + { /* ISP_THI MMIO */ .base = 0x16c8, .offset = 0x30, .limit = 0x34 }, + { /* NVDLA MMIO */ .base = 0x16d8, .offset = 0x30, .limit = 0x34 }, + { /* NVDLA ch */ .base = 0x17e0, .offset = 0x30, .limit = 0x34 }, + { /* NVDLA1 MMIO */ .base = 0x16e0, .offset = 0x30, .limit = 0x34 }, + { /* NVDLA1 ch */ .base = 0x17e8, .offset = 0x30, .limit = 0x34 }, + { /* OFA MMIO */ .base = 0x16e8, .offset = 0x34, .limit = 0x34 }, + { /* OFA ch */ .base = 0x1768, .offset = 0x30, .limit = 0x30 }, + { /* VI2 MMIO */ .base = 0x16f0, .offset = 0x800, .limit = 0x800 }, + { /* VI2_THI MMIO */ .base = 0x16f8, .offset = 0x30, .limit = 0x34 }, }; static const struct host1x_info host1x08_info = { diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h index 92031b240a17..d3855a1c6b47 100644 --- a/drivers/gpu/host1x/dev.h +++ b/drivers/gpu/host1x/dev.h @@ -175,11 +175,11 @@ struct host1x { }; void host1x_common_writel(struct host1x *host1x, u32 v, u32 r); -void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v); +void host1x_hypervisor_writel(struct host1x *host1x, u32 v, u32 r); u32 host1x_hypervisor_readl(struct host1x *host1x, u32 r); -void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v); +void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r); u32 host1x_sync_readl(struct host1x *host1x, u32 r); -void host1x_ch_writel(struct host1x_channel *ch, u32 r, u32 v); +void host1x_ch_writel(struct host1x_channel *ch, u32 v, u32 r); u32 host1x_ch_readl(struct host1x_channel *ch, u32 r); static inline void host1x_hw_syncpt_restore(struct host1x *host, diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c index 1b65a10b9dfc..3f3f0018eee0 100644 --- a/drivers/gpu/host1x/hw/cdma_hw.c +++ b/drivers/gpu/host1x/hw/cdma_hw.c @@ -254,12 +254,24 @@ static void timeout_release_mlock(struct host1x_cdma *cdma) u32 offset; switch (ch->client->class) { + case HOST1X_CLASS_NVJPG1: + offset = HOST1X_COMMON_NVJPG1_MLOCK; + break; + case HOST1X_CLASS_NVENC: + offset = HOST1X_COMMON_NVENC_MLOCK; + break; case HOST1X_CLASS_VIC: offset = HOST1X_COMMON_VIC_MLOCK; break; + case HOST1X_CLASS_NVJPG: + offset = HOST1X_COMMON_NVJPG_MLOCK; + break; case HOST1X_CLASS_NVDEC: offset = HOST1X_COMMON_NVDEC_MLOCK; break; + case HOST1X_CLASS_OFA: + offset = HOST1X_COMMON_OFA_MLOCK; + break; default: WARN(1, "%s was not updated for class %u", __func__, ch->client->class); return; diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c index 54e31d81517b..4c32aa1b95e8 100644 --- a/drivers/gpu/host1x/hw/debug_hw.c +++ b/drivers/gpu/host1x/hw/debug_hw.c @@ -177,7 +177,16 @@ static void show_gather(struct output *o, dma_addr_t phys_addr, for (i = 0; i < words; i++) { dma_addr_t addr = phys_addr + i * 4; - u32 val = *(map_addr + offset / 4 + i); + u32 voffset = offset + i * 4; + u32 val; + + /* If we reach the RESTART opcode, continue at the beginning of pushbuffer */ + if (cdma && voffset >= cdma->push_buffer.size) { + addr -= cdma->push_buffer.size; + voffset -= cdma->push_buffer.size; + } + + val = *(map_addr + voffset / 4); if (!data_count) { host1x_debug_output(o, " %pad: %08x: ", &addr, val); @@ -203,7 +212,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma) job->num_slots, job->num_unpins); show_gather(o, pb->dma + job->first_get, job->num_slots * 2, cdma, - pb->dma + job->first_get, pb->mapped + job->first_get); + pb->dma, pb->mapped); for (i = 0; i < job->num_cmds; i++) { struct host1x_job_gather *g; @@ -227,7 +236,7 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma) host1x_debug_output(o, " GATHER at %pad+%#x, %d words\n", &g->base, g->offset, g->words); - show_gather(o, g->base + g->offset, g->words, cdma, + show_gather(o, g->base + g->offset, g->words, NULL, g->base, mapped); if (!job->gather_copy_mapped) diff --git a/include/drm/bridge/imx.h b/include/drm/bridge/imx.h new file mode 100644 index 000000000000..e14f429a9ca2 --- /dev/null +++ b/include/drm/bridge/imx.h @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2012 Sascha Hauer, Pengutronix + */ + +#ifndef DRM_IMX_BRIDGE_H +#define DRM_IMX_BRIDGE_H + +struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev, + struct device_node *np, + int type); + +#endif diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 9a73f786f4ad..00830b49a3ff 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -17,7 +17,6 @@ struct drm_mode_create_dumb; struct drm_plane; struct drm_plane_state; -struct drm_simple_display_pipe; struct filp; struct vm_area_struct; @@ -137,18 +136,6 @@ drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, \ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb -/* - * Helpers for struct drm_simple_display_pipe_funcs - */ - -int drm_gem_vram_simple_display_pipe_prepare_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *new_state); - -void drm_gem_vram_simple_display_pipe_cleanup_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state); - /** * define DRM_GEM_VRAM_DRIVER - default callback functions for * &struct drm_driver diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index fe8edb917360..a8d19b10f9b8 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -579,7 +579,7 @@ bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); -void drm_sched_start(struct drm_gpu_scheduler *sched); +void drm_sched_start(struct drm_gpu_scheduler *sched, int errno); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); void drm_sched_increase_karma(struct drm_sched_job *bad); void drm_sched_reset_karma(struct drm_sched_job *bad); diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index e06bad467f55..e7ad819962e3 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -574,6 +574,12 @@ int dma_fence_get_status(struct dma_fence *fence); * rather than success. This must be set before signaling (so that the value * is visible before any waiters on the signal callback are woken). This * helper exists to help catching erroneous setting of #dma_fence.error. + * + * Examples of error codes which drivers should use: + * + * * %-ENODATA This operation produced no data, no other operation affected. + * * %-ECANCELED All operations from the same context have been canceled. + * * %-ETIME Operation caused a timeout and potentially device reset. */ static inline void dma_fence_set_error(struct dma_fence *fence, int error) diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 9c8119ed13a4..5a7a81e5f9bd 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -14,12 +14,17 @@ enum host1x_class { HOST1X_CLASS_HOST1X = 0x1, + HOST1X_CLASS_NVJPG1 = 0x7, + HOST1X_CLASS_NVENC = 0x21, + HOST1X_CLASS_NVENC1 = 0x22, HOST1X_CLASS_GR2D = 0x51, HOST1X_CLASS_GR2D_SB = 0x52, HOST1X_CLASS_VIC = 0x5D, HOST1X_CLASS_GR3D = 0x60, + HOST1X_CLASS_NVJPG = 0xC0, HOST1X_CLASS_NVDEC = 0xF0, HOST1X_CLASS_NVDEC1 = 0xF5, + HOST1X_CLASS_OFA = 0xF8, }; struct host1x; diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h index 72462737a6db..c928cb432680 100644 --- a/include/linux/host1x_context_bus.h +++ b/include/linux/host1x_context_bus.h @@ -9,7 +9,7 @@ #include <linux/device.h> #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS -extern struct bus_type host1x_context_device_bus_type; +extern const struct bus_type host1x_context_device_bus_type; #endif #endif diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index 9f231d40a146..568724be6628 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -40,6 +40,7 @@ extern "C" { #define DRM_IOCTL_PANFROST_PERFCNT_DUMP DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_PERFCNT_DUMP, struct drm_panfrost_perfcnt_dump) #define PANFROST_JD_REQ_FS (1 << 0) +#define PANFROST_JD_REQ_CYCLE_COUNT (1 << 1) /** * struct drm_panfrost_submit - ioctl argument for submitting commands to the 3D * engine. @@ -172,6 +173,8 @@ enum drm_panfrost_param { DRM_PANFROST_PARAM_NR_CORE_GROUPS, DRM_PANFROST_PARAM_THREAD_TLS_ALLOC, DRM_PANFROST_PARAM_AFBC_FEATURES, + DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP, + DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY, }; struct drm_panfrost_get_param { diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h index e23a7f9b0eac..1fd8473548ac 100644 --- a/include/uapi/drm/panthor_drm.h +++ b/include/uapi/drm/panthor_drm.h @@ -260,6 +260,9 @@ enum drm_panthor_dev_query_type { /** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */ DRM_PANTHOR_DEV_QUERY_CSIF_INFO, + + /** @DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: Query timestamp information. */ + DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO, }; /** @@ -378,6 +381,25 @@ struct drm_panthor_csif_info { }; /** + * struct drm_panthor_timestamp_info - Timestamp information + * + * Structure grouping all queryable information relating to the GPU timestamp. + */ +struct drm_panthor_timestamp_info { + /** + * @timestamp_frequency: The frequency of the timestamp timer or 0 if + * unknown. + */ + __u64 timestamp_frequency; + + /** @current_timestamp: The current timestamp. */ + __u64 current_timestamp; + + /** @timestamp_offset: The offset of the timestamp timer. */ + __u64 timestamp_offset; +}; + +/** * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY */ struct drm_panthor_dev_query { |