summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml63
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix_dp.txt51
-rw-r--r--Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml16
-rw-r--r--Documentation/devicetree/bindings/display/dsi-controller.yaml18
-rw-r--r--Documentation/devicetree/bindings/display/exynos/exynos_dp.txt2
-rw-r--r--Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml57
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt98
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt94
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml103
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml166
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,lvds.yaml170
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt92
-rw-r--r--Documentation/devicetree/bindings/display/simple-framebuffer.yaml9
-rw-r--r--Documentation/devicetree/bindings/soc/rockchip/grf.yaml10
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c10
-rw-r--r--drivers/dma-buf/dma-buf.c2
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c324
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c10
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c40
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h84
-rw-r--r--drivers/gpu/drm/ast/ast_i2c.c8
-rw-r--r--drivers/gpu/drm/ast/ast_main.c24
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c4
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c104
-rw-r--r--drivers/gpu/drm/ast/ast_post.c94
-rw-r--r--drivers/gpu/drm/bridge/Kconfig2
-rw-r--r--drivers/gpu/drm/bridge/panel.c2
-rw-r--r--drivers/gpu/drm/bridge/tc358762.c1
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c20
-rw-r--r--drivers/gpu/drm/drm_connector.c28
-rw-r--r--drivers/gpu/drm/drm_displayid.c62
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c65
-rw-r--r--drivers/gpu/drm/drm_gem.c25
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c65
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c11
-rw-r--r--drivers/gpu/drm/drm_internal.h3
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_of.c51
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c5
-rw-r--r--drivers/gpu/drm/drm_suballoc.c457
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c15
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c7
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h2
-rw-r--r--drivers/gpu/drm/panel/Kconfig11
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c209
-rw-r--r--drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c522
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c11
-rw-r--r--drivers/gpu/drm/radeon/Kconfig1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h25
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c316
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c16
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c7
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop2.c80
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c19
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h6
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c29
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c10
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c12
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h8
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c20
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c1
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c26
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c78
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c46
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h1
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c4
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c1
-rw-r--r--drivers/gpu/drm/virtio/Kconfig11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c39
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c407
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h203
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c65
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c26
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h245
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c105
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c91
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c232
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c57
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c22
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c68
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c246
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c53
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c67
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c323
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c20
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c107
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c134
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_va.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c150
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h10
-rw-r--r--drivers/ps3/ps3av.c9
-rw-r--r--drivers/video/Kconfig3
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/cmdline.c133
-rw-r--r--drivers/video/fbdev/Kconfig5
-rw-r--r--drivers/video/fbdev/core/Makefile3
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c94
-rw-r--r--drivers/video/fbdev/core/modedb.c8
-rw-r--r--include/drm/drm_atomic_helper.h26
-rw-r--r--include/drm/drm_displayid.h12
-rw-r--r--include/drm/drm_drv.h19
-rw-r--r--include/drm/drm_edid.h12
-rw-r--r--include/drm/drm_gem.h12
-rw-r--r--include/drm/drm_gem_shmem_helper.h30
-rw-r--r--include/drm/drm_modeset_helper_vtables.h29
-rw-r--r--include/drm/drm_of.h12
-rw-r--r--include/drm/drm_suballoc.h108
-rw-r--r--include/drm/gpu_scheduler.h6
-rw-r--r--include/drm/ttm/ttm_device.h2
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/uapi/drm/drm.h57
-rw-r--r--include/video/cmdline.h20
155 files changed, 4430 insertions, 3332 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml
new file mode 100644
index 000000000000..c9b06885cc63
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/analogix,dp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analogix Display Port bridge
+
+maintainers:
+ - Rob Herring <robh@kernel.org>
+
+properties:
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks: true
+
+ clock-names: true
+
+ phys: true
+
+ phy-names:
+ const: dp
+
+ force-hpd:
+ description:
+ Indicate driver need force hpd when hpd detect failed, this
+ is used for some eDP screen which don not have a hpd signal.
+
+ hpd-gpios:
+ description:
+ Hotplug detect GPIO.
+ Indicates which GPIO should be used for hotplug detection
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Input node to receive pixel data.
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Port node with one endpoint connected to a dp-connector node.
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - reg
+ - interrupts
+ - clock-names
+ - clocks
+ - ports
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
deleted file mode 100644
index 027d76c27a41..000000000000
--- a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-Analogix Display Port bridge bindings
-
-Required properties for dp-controller:
- -compatible:
- platform specific such as:
- * "samsung,exynos5-dp"
- * "rockchip,rk3288-dp"
- * "rockchip,rk3399-edp"
- -reg:
- physical base address of the controller and length
- of memory mapped region.
- -interrupts:
- interrupt combiner values.
- -clocks:
- from common clock binding: handle to dp clock.
- -clock-names:
- from common clock binding: Shall be "dp".
- -phys:
- from general PHY binding: the phandle for the PHY device.
- -phy-names:
- from general PHY binding: Should be "dp".
-
-Optional properties for dp-controller:
- -force-hpd:
- Indicate driver need force hpd when hpd detect failed, this
- is used for some eDP screen which don't have hpd signal.
- -hpd-gpios:
- Hotplug detect GPIO.
- Indicates which GPIO should be used for hotplug detection
- -port@[X]: SoC specific port nodes with endpoint definitions as defined
- in Documentation/devicetree/bindings/media/video-interfaces.txt,
- please refer to the SoC specific binding document:
- * Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
- * Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
-
-[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
--------------------------------------------------------------------------------
-
-Example:
-
- dp-controller {
- compatible = "samsung,exynos5-dp";
- reg = <0x145b0000 0x10000>;
- interrupts = <10 3>;
- interrupt-parent = <&combiner>;
- clocks = <&clock 342>;
- clock-names = "dp";
-
- phys = <&dp_phy>;
- phy-names = "dp";
- };
diff --git a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
index 11fd68a70dca..0b51c64f141a 100644
--- a/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/snps,dw-mipi-dsi.yaml
@@ -26,19 +26,9 @@ properties:
reg:
maxItems: 1
- clocks:
- items:
- - description: Module clock
- - description: DSI bus clock for either AHB and APB
- - description: Pixel clock for the DPI/RGB input
- minItems: 2
-
- clock-names:
- items:
- - const: ref
- - const: pclk
- - const: px_clk
- minItems: 2
+ clocks: true
+
+ clock-names: true
resets:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/dsi-controller.yaml b/Documentation/devicetree/bindings/display/dsi-controller.yaml
index ca21671f6bdd..67ce10307ee0 100644
--- a/Documentation/devicetree/bindings/display/dsi-controller.yaml
+++ b/Documentation/devicetree/bindings/display/dsi-controller.yaml
@@ -30,6 +30,15 @@ properties:
$nodename:
pattern: "^dsi(@.*)?$"
+ clock-master:
+ type: boolean
+ description:
+ Should be enabled if the host is being used in conjunction with
+ another DSI host to drive the same peripheral. Hardware supporting
+ such a configuration generally requires the data on both the busses
+ to be driven by the same clock. Only the DSI host instance
+ controlling this clock should contain this property.
+
"#address-cells":
const: 1
@@ -52,15 +61,6 @@ patternProperties:
case the reg property can take multiple entries, one for each virtual
channel that the peripheral responds to.
- clock-master:
- type: boolean
- description:
- Should be enabled if the host is being used in conjunction with
- another DSI host to drive the same peripheral. Hardware supporting
- such a configuration generally requires the data on both the busses
- to be driven by the same clock. Only the DSI host instance
- controlling this clock should contain this property.
-
enforce-video-mode:
type: boolean
description:
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
index 9b6cba3f82af..3a401590320f 100644
--- a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
+++ b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt
@@ -50,7 +50,7 @@ Optional properties for dp-controller:
Documentation/devicetree/bindings/display/panel/display-timing.txt
For the below properties, please refer to Analogix DP binding document:
- * Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
+ * Documentation/devicetree/bindings/display/bridge/analogix,dp.yaml
-phys (required)
-phy-names (required)
-hpd-gpios (optional)
diff --git a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
index c06902e4fe70..41eb7fbf7715 100644
--- a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
+++ b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
@@ -17,6 +17,8 @@ properties:
items:
- enum:
- chongzhou,cz101b4001
+ - radxa,display-10hd-ad001
+ - radxa,display-8hd-ad002
- const: jadard,jd9365da-h3
reg: true
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml
new file mode 100644
index 000000000000..58fa073ce258
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,ams495qa01.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,ams495qa01.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung AMS495QA01 panel with Magnachip D53E6EA8966 controller
+
+maintainers:
+ - Chris Morgan <macromorgan@hotmail.com>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: samsung,ams495qa01
+
+ reg: true
+ reset-gpios:
+ description: reset gpio, must be GPIO_ACTIVE_LOW
+ elvdd-supply:
+ description: regulator that supplies voltage to the panel display
+ enable-gpios: true
+ port: true
+ vdd-supply:
+ description: regulator that supplies voltage to panel logic
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "samsung,ams495qa01";
+ reg = <0>;
+ reset-gpios = <&gpio4 0 GPIO_ACTIVE_LOW>;
+ vdd-supply = <&vcc_3v3>;
+
+ port {
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
deleted file mode 100644
index 43561584c13a..000000000000
--- a/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt
+++ /dev/null
@@ -1,98 +0,0 @@
-Rockchip RK3288 specific extensions to the Analogix Display Port
-================================
-
-Required properties:
-- compatible: "rockchip,rk3288-dp",
- "rockchip,rk3399-edp";
-
-- reg: physical base address of the controller and length
-
-- clocks: from common clock binding: handle to dp clock.
- of memory mapped region.
-
-- clock-names: from common clock binding:
- Required elements: "dp" "pclk"
-
-- resets: Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-
-- pinctrl-names: Names corresponding to the chip hotplug pinctrl states.
-- pinctrl-0: pin-control mode. should be <&edp_hpd>
-
-- reset-names: Must include the name "dp"
-
-- rockchip,grf: this soc should set GRF regs, so need get grf here.
-
-- ports: there are 2 port nodes with endpoint definitions as defined in
- Documentation/devicetree/bindings/media/video-interfaces.txt.
- Port 0: contained 2 endpoints, connecting to the output of vop.
- Port 1: contained 1 endpoint, connecting to the input of panel.
-
-Optional property for different chips:
-- clocks: from common clock binding: handle to grf_vio clock.
-
-- clock-names: from common clock binding:
- Required elements: "grf"
-
-For the below properties, please refer to Analogix DP binding document:
- * Documentation/devicetree/bindings/display/bridge/analogix_dp.txt
-- phys (required)
-- phy-names (required)
-- hpd-gpios (optional)
-- force-hpd (optional)
--------------------------------------------------------------------------------
-
-Example:
- dp-controller: dp@ff970000 {
- compatible = "rockchip,rk3288-dp";
- reg = <0xff970000 0x4000>;
- interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>;
- clock-names = "dp", "pclk";
- phys = <&dp_phy>;
- phy-names = "dp";
-
- rockchip,grf = <&grf>;
- resets = <&cru 111>;
- reset-names = "dp";
-
- pinctrl-names = "default";
- pinctrl-0 = <&edp_hpd>;
-
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
- edp_in: port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
- edp_in_vopb: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&vopb_out_edp>;
- };
- edp_in_vopl: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&vopl_out_edp>;
- };
- };
-
- edp_out: port@1 {
- reg = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
- edp_out_panel: endpoint {
- reg = <0>;
- remote-endpoint = <&panel_in_edp>
- };
- };
- };
- };
-
- pinctrl {
- edp {
- edp_hpd: edp-hpd {
- rockchip,pins = <7 11 RK_FUNC_2 &pcfg_pull_none>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
deleted file mode 100644
index 9a223df8530c..000000000000
--- a/Documentation/devicetree/bindings/display/rockchip/dw_mipi_dsi_rockchip.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-Rockchip specific extensions to the Synopsys Designware MIPI DSI
-================================
-
-Required properties:
-- #address-cells: Should be <1>.
-- #size-cells: Should be <0>.
-- compatible: one of
- "rockchip,px30-mipi-dsi", "snps,dw-mipi-dsi"
- "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi"
- "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"
- "rockchip,rk3568-mipi-dsi", "snps,dw-mipi-dsi"
-- reg: Represent the physical address range of the controller.
-- interrupts: Represent the controller's interrupt to the CPU(s).
-- clocks, clock-names: Phandles to the controller's pll reference
- clock(ref) when using an internal dphy and APB clock(pclk).
- For RK3399, a phy config clock (phy_cfg) and a grf clock(grf)
- are required. As described in [1].
-- rockchip,grf: this soc should set GRF regs to mux vopl/vopb.
-- ports: contain a port node with endpoint definitions as defined in [2].
- For vopb,set the reg = <0> and set the reg = <1> for vopl.
-- video port 0 for the VOP input, the remote endpoint maybe vopb or vopl
-- video port 1 for either a panel or subsequent encoder
-
-Optional properties:
-- phys: from general PHY binding: the phandle for the PHY device.
-- phy-names: Should be "dphy" if phys references an external phy.
-- #phy-cells: Defined when used as ISP phy, should be 0.
-- power-domains: a phandle to mipi dsi power domain node.
-- resets: list of phandle + reset specifier pairs, as described in [3].
-- reset-names: string reset name, must be "apb".
-
-[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/media/video-interfaces.txt
-[3] Documentation/devicetree/bindings/reset/reset.txt
-
-Example:
- mipi_dsi: mipi@ff960000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi";
- reg = <0xff960000 0x4000>;
- interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&cru SCLK_MIPI_24M>, <&cru PCLK_MIPI_DSI0>;
- clock-names = "ref", "pclk";
- resets = <&cru SRST_MIPIDSI0>;
- reset-names = "apb";
- rockchip,grf = <&grf>;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- mipi_in: port@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mipi_in_vopb: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&vopb_out_mipi>;
- };
- mipi_in_vopl: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&vopl_out_mipi>;
- };
- };
-
- mipi_out: port@1 {
- reg = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mipi_out_panel: endpoint {
- remote-endpoint = <&panel_in_mipi>;
- };
- };
- };
-
- panel {
- compatible ="boe,tv080wum-nl0";
- reg = <0>;
-
- enable-gpios = <&gpio7 3 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&lcd_en>;
- backlight = <&backlight>;
-
- port {
- panel_in_mipi: endpoint {
- remote-endpoint = <&mipi_out_panel>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml
new file mode 100644
index 000000000000..60dedf9b2be7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,analogix-dp.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip,analogix-dp.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip specific extensions to the Analogix Display Port
+
+maintainers:
+ - Sandy Huang <hjc@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ enum:
+ - rockchip,rk3288-dp
+ - rockchip,rk3399-edp
+
+ clocks:
+ minItems: 2
+ maxItems: 3
+
+ clock-names:
+ minItems: 2
+ items:
+ - const: dp
+ - const: pclk
+ - const: grf
+
+ power-domains:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: dp
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ This SoC makes use of GRF regs.
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+ - rockchip,grf
+
+allOf:
+ - $ref: /schemas/display/bridge/analogix,dp.yaml#
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3288-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ dp@ff970000 {
+ compatible = "rockchip,rk3288-dp";
+ reg = <0xff970000 0x4000>;
+ interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>;
+ clock-names = "dp", "pclk";
+ phys = <&dp_phy>;
+ phy-names = "dp";
+ resets = <&cru 111>;
+ reset-names = "dp";
+ rockchip,grf = <&grf>;
+ pinctrl-0 = <&edp_hpd>;
+ pinctrl-names = "default";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ edp_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ edp_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_edp>;
+ };
+ edp_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_edp>;
+ };
+ };
+
+ edp_out: port@1 {
+ reg = <1>;
+
+ edp_out_panel: endpoint {
+ remote-endpoint = <&panel_in_edp>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml
new file mode 100644
index 000000000000..8e8a40879140
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,dw-mipi-dsi.yaml
@@ -0,0 +1,166 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip,dw-mipi-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip specific extensions to the Synopsys Designware MIPI DSI
+
+maintainers:
+ - Sandy Huang <hjc@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - rockchip,px30-mipi-dsi
+ - rockchip,rk3288-mipi-dsi
+ - rockchip,rk3399-mipi-dsi
+ - rockchip,rk3568-mipi-dsi
+ - const: snps,dw-mipi-dsi
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 4
+
+ clock-names:
+ oneOf:
+ - minItems: 2
+ items:
+ - const: ref
+ - const: pclk
+ - const: phy_cfg
+ - const: grf
+ - const: pclk
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ This SoC uses GRF regs to switch between vopl/vopb.
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: dphy
+
+ "#phy-cells":
+ const: 0
+ description:
+ Defined when in use as ISP phy.
+
+ power-domains:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - rockchip,grf
+
+allOf:
+ - $ref: /schemas/display/bridge/snps,dw-mipi-dsi.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - rockchip,px30-mipi-dsi
+ - rockchip,rk3568-mipi-dsi
+
+ then:
+ properties:
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ maxItems: 1
+
+ required:
+ - phys
+ - phy-names
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3288-mipi-dsi
+
+ then:
+ properties:
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ maxItems: 2
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3399-mipi-dsi
+
+ then:
+ properties:
+ clocks:
+ minItems: 4
+
+ clock-names:
+ minItems: 4
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3288-cru.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ mipi_dsi: dsi@ff960000 {
+ compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi";
+ reg = <0xff960000 0x4000>;
+ interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru SCLK_MIPIDSI_24M>, <&cru PCLK_MIPI_DSI0>;
+ clock-names = "ref", "pclk";
+ resets = <&cru SRST_MIPIDSI0>;
+ reset-names = "apb";
+ rockchip,grf = <&grf>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mipi_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mipi_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_mipi>;
+ };
+ mipi_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_mipi>;
+ };
+ };
+
+ mipi_out: port@1 {
+ reg = <1>;
+
+ mipi_out_panel: endpoint {
+ remote-endpoint = <&panel_in_mipi>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,lvds.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,lvds.yaml
new file mode 100644
index 000000000000..03b002a05c47
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,lvds.yaml
@@ -0,0 +1,170 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip,lvds.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip low-voltage differential signal (LVDS) transmitter
+
+maintainers:
+ - Sandy Huang <hjc@rock-chips.com>
+ - Heiko Stuebner <heiko@sntech.de>
+
+properties:
+ compatible:
+ enum:
+ - rockchip,px30-lvds
+ - rockchip,rk3288-lvds
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: pclk_lvds
+
+ avdd1v0-supply:
+ description: 1.0V analog power.
+
+ avdd1v8-supply:
+ description: 1.8V analog power.
+
+ avdd3v3-supply:
+ description: 3.3V analog power.
+
+ rockchip,grf:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Phandle to the general register files syscon.
+
+ rockchip,output:
+ $ref: /schemas/types.yaml#/definitions/string
+ enum: [rgb, lvds, duallvds]
+ description: This describes the output interface.
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: dphy
+
+ pinctrl-names:
+ const: lcdc
+
+ pinctrl-0: true
+
+ power-domains:
+ maxItems: 1
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Video port 0 for the VOP input.
+ The remote endpoint maybe vopb or vopl.
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description:
+ Video port 1 for either a panel or subsequent encoder.
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - rockchip,grf
+ - rockchip,output
+ - ports
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,px30-lvds
+
+ then:
+ properties:
+ reg: false
+ clocks: false
+ clock-names: false
+ avdd1v0-supply: false
+ avdd1v8-supply: false
+ avdd3v3-supply: false
+
+ required:
+ - phys
+ - phy-names
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3288-lvds
+
+ then:
+ properties:
+ phys: false
+ phy-names: false
+
+ required:
+ - reg
+ - clocks
+ - clock-names
+ - avdd1v0-supply
+ - avdd1v8-supply
+ - avdd3v3-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/rk3288-cru.h>
+
+ lvds: lvds@ff96c000 {
+ compatible = "rockchip,rk3288-lvds";
+ reg = <0xff96c000 0x4000>;
+ clocks = <&cru PCLK_LVDS_PHY>;
+ clock-names = "pclk_lvds";
+ avdd1v0-supply = <&vdd10_lcd>;
+ avdd1v8-supply = <&vcc18_lcd>;
+ avdd3v3-supply = <&vcca_33>;
+ pinctrl-names = "lcdc";
+ pinctrl-0 = <&lcdc_ctl>;
+ rockchip,grf = <&grf>;
+ rockchip,output = "rgb";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ lvds_in: port@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ lvds_in_vopb: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&vopb_out_lvds>;
+ };
+ lvds_in_vopl: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&vopl_out_lvds>;
+ };
+ };
+
+ lvds_out: port@1 {
+ reg = <1>;
+
+ lvds_out_panel: endpoint {
+ remote-endpoint = <&panel_in_lvds>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt
deleted file mode 100644
index aaf8c44cf90f..000000000000
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt
+++ /dev/null
@@ -1,92 +0,0 @@
-Rockchip RK3288 LVDS interface
-================================
-
-Required properties:
-- compatible: matching the soc type, one of
- - "rockchip,rk3288-lvds";
- - "rockchip,px30-lvds";
-
-- reg: physical base address of the controller and length
- of memory mapped region.
-- clocks: must include clock specifiers corresponding to entries in the
- clock-names property.
-- clock-names: must contain "pclk_lvds"
-
-- avdd1v0-supply: regulator phandle for 1.0V analog power
-- avdd1v8-supply: regulator phandle for 1.8V analog power
-- avdd3v3-supply: regulator phandle for 3.3V analog power
-
-- rockchip,grf: phandle to the general register files syscon
-- rockchip,output: "rgb", "lvds" or "duallvds", This describes the output interface
-
-- phys: LVDS/DSI DPHY (px30 only)
-- phy-names: name of the PHY, must be "dphy" (px30 only)
-
-Optional properties:
-- pinctrl-names: must contain a "lcdc" entry.
-- pinctrl-0: pin control group to be used for this controller.
-
-Required nodes:
-
-The lvds has two video ports as described by
- Documentation/devicetree/bindings/media/video-interfaces.txt
-Their connections are modeled using the OF graph bindings specified in
- Documentation/devicetree/bindings/graph.txt.
-
-- video port 0 for the VOP input, the remote endpoint maybe vopb or vopl
-- video port 1 for either a panel or subsequent encoder
-
-Example:
-
-lvds_panel: lvds-panel {
- compatible = "auo,b101ean01";
- enable-gpios = <&gpio7 21 GPIO_ACTIVE_HIGH>;
- data-mapping = "jeida-24";
-
- ports {
- panel_in_lvds: endpoint {
- remote-endpoint = <&lvds_out_panel>;
- };
- };
-};
-
-For Rockchip RK3288:
-
- lvds: lvds@ff96c000 {
- compatible = "rockchip,rk3288-lvds";
- rockchip,grf = <&grf>;
- reg = <0xff96c000 0x4000>;
- clocks = <&cru PCLK_LVDS_PHY>;
- clock-names = "pclk_lvds";
- pinctrl-names = "lcdc";
- pinctrl-0 = <&lcdc_ctl>;
- avdd1v0-supply = <&vdd10_lcd>;
- avdd1v8-supply = <&vcc18_lcd>;
- avdd3v3-supply = <&vcca_33>;
- rockchip,output = "rgb";
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- lvds_in: port@0 {
- reg = <0>;
-
- lvds_in_vopb: endpoint@0 {
- reg = <0>;
- remote-endpoint = <&vopb_out_lvds>;
- };
- lvds_in_vopl: endpoint@1 {
- reg = <1>;
- remote-endpoint = <&vopl_out_lvds>;
- };
- };
-
- lvds_out: port@1 {
- reg = <1>;
-
- lvds_out_panel: endpoint {
- remote-endpoint = <&panel_in_lvds>;
- };
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
index 3c9f29e428a4..296500f9da05 100644
--- a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
+++ b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
@@ -26,6 +26,11 @@ description: |+
over control to a driver for the real hardware. The bindings for the
hw nodes must specify which node is considered the primary node.
+ If a panel node is given, then the driver uses this to configure the
+ physical width and height of the display. If no panel node is given,
+ then the driver uses the width and height properties of the simplefb
+ node to estimate it.
+
It is advised to add display# aliases to help the OS determine how
to number things. If display# aliases are used, then if the simplefb
node contains a display property then the /aliases/display# path
@@ -117,6 +122,10 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle
description: Primary display hardware node
+ panel:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: Display panel node
+
allwinner,pipeline:
description: Pipeline used by the framebuffer on Allwinner SoCs
enum:
diff --git a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
index e697c928900d..65a2d5a4f28d 100644
--- a/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
+++ b/Documentation/devicetree/bindings/soc/rockchip/grf.yaml
@@ -80,13 +80,17 @@ allOf:
properties:
compatible:
contains:
- const: rockchip,px30-grf
+ enum:
+ - rockchip,px30-grf
then:
properties:
lvds:
- description:
- Documentation/devicetree/bindings/display/rockchip/rockchip-lvds.txt
+ type: object
+
+ $ref: /schemas/display/rockchip/rockchip,lvds.yaml#
+
+ unevaluatedProperties: false
- if:
properties:
diff --git a/MAINTAINERS b/MAINTAINERS
index ec57c42ed544..88e138cd1091 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7044,7 +7044,7 @@ F: Documentation/devicetree/bindings/display/xlnx/
F: drivers/gpu/drm/xlnx/
DRM PANEL DRIVERS
-M: Thierry Reding <thierry.reding@gmail.com>
+M: Neil Armstrong <neil.armstrong@linaro.org>
R: Sam Ravnborg <sam@ravnborg.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 553bcbd787b3..a880f1dd857e 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -237,8 +237,6 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
{
int ret;
- ivpu_dbg(vdev, RPM, "rpm_get count %d\n", atomic_read(&vdev->drm.dev->power.usage_count));
-
ret = pm_runtime_resume_and_get(vdev->drm.dev);
if (!drm_WARN_ON(&vdev->drm, ret < 0))
vdev->pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
@@ -248,8 +246,6 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
void ivpu_rpm_put(struct ivpu_device *vdev)
{
- ivpu_dbg(vdev, RPM, "rpm_put count %d\n", atomic_read(&vdev->drm.dev->power.usage_count));
-
pm_runtime_mark_last_busy(vdev->drm.dev);
pm_runtime_put_autosuspend(vdev->drm.dev);
}
@@ -314,16 +310,10 @@ void ivpu_pm_enable(struct ivpu_device *vdev)
pm_runtime_allow(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
-
- ivpu_dbg(vdev, RPM, "Enable RPM count %d\n", atomic_read(&dev->power.usage_count));
}
void ivpu_pm_disable(struct ivpu_device *vdev)
{
- struct device *dev = vdev->drm.dev;
-
- ivpu_dbg(vdev, RPM, "Disable RPM count %d\n", atomic_read(&dev->power.usage_count));
-
pm_runtime_get_noresume(vdev->drm.dev);
pm_runtime_forbid(vdev->drm.dev);
}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 757c0fb77a6c..aa4ea8530cb3 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -828,7 +828,7 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
* - dma_buf_attach()
* - dma_buf_dynamic_attach()
* - dma_buf_detach()
- * - dma_buf_export(
+ * - dma_buf_export()
* - dma_buf_fd()
* - dma_buf_get()
* - dma_buf_put()
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index dc0f94f02a82..ba3fb04bb691 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -10,13 +10,13 @@ menuconfig DRM
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select DRM_PANEL_ORIENTATION_QUIRKS
select HDMI
- select FB_CMDLINE
select I2C
select DMA_SHARED_BUFFER
select SYNC_FILE
# gallium uses SYS_kcmp for os_same_file_description() to de-duplicate
# device and dmabuf fd. Let's make sure that is available for our userspace.
select KCMP
+ select VIDEO_CMDLINE
select VIDEO_NOMODESET
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
@@ -232,6 +232,10 @@ config DRM_GEM_SHMEM_HELPER
help
Choose this if you need the GEM shmem helper functions
+config DRM_SUBALLOC_HELPER
+ tristate
+ depends on DRM
+
config DRM_SCHED
tristate
depends on DRM
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ab4460fcd63f..1e04d135e866 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -88,6 +88,9 @@ obj-$(CONFIG_DRM_GEM_DMA_HELPER) += drm_dma_helper.o
drm_shmem_helper-y := drm_gem_shmem_helper.o
obj-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_shmem_helper.o
+drm_suballoc_helper-y := drm_suballoc.o
+obj-$(CONFIG_DRM_SUBALLOC_HELPER) += drm_suballoc_helper.o
+
drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgpu/Kconfig
index a82d36ea88e2..5d1e28218020 100644
--- a/drivers/gpu/drm/amd/amdgpu/Kconfig
+++ b/drivers/gpu/drm/amd/amdgpu/Kconfig
@@ -19,6 +19,7 @@ config DRM_AMDGPU
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select DRM_BUDDY
+ select DRM_SUBALLOC_HELPER
# amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work
# ACPI_VIDEO's dependencies must also be selected.
select INPUT if ACPI
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 164141bc8b4a..dda88090f044 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -424,29 +424,11 @@ struct amdgpu_clock {
* alignment).
*/
-#define AMDGPU_SA_NUM_FENCE_LISTS 32
-
struct amdgpu_sa_manager {
- wait_queue_head_t wq;
- struct amdgpu_bo *bo;
- struct list_head *hole;
- struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
- struct list_head olist;
- unsigned size;
- uint64_t gpu_addr;
- void *cpu_ptr;
- uint32_t domain;
- uint32_t align;
-};
-
-/* sub-allocation buffer */
-struct amdgpu_sa_bo {
- struct list_head olist;
- struct list_head flist;
- struct amdgpu_sa_manager *manager;
- unsigned soffset;
- unsigned eoffset;
- struct dma_fence *fence;
+ struct drm_suballoc_manager base;
+ struct amdgpu_bo *bo;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
};
int amdgpu_fence_slab_init(void);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index bcccc348dbe2..df7eb0b7c4b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -69,7 +69,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (size) {
r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
- &ib->sa_bo, size, 256);
+ &ib->sa_bo, size);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
return r;
@@ -309,8 +309,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
- AMDGPU_IB_POOL_SIZE,
- AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_IB_POOL_SIZE, 256,
AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 6c7d672412b2..c842ce635a88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -600,7 +600,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
- bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ amdgpu_bo_in_cpu_visible_vram(bo))
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
@@ -1346,7 +1346,6 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
- unsigned long offset;
int r;
/* Remember that this BO was accessed by the CPU */
@@ -1355,8 +1354,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- offset = bo->resource->start << PAGE_SHIFT;
- if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
+ if (amdgpu_bo_in_cpu_visible_vram(abo))
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1378,10 +1376,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
- offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
if (bo->resource->mem_type == TTM_PL_VRAM &&
- (offset + bo->base.size) > adev->gmc.visible_vram_size)
+ !amdgpu_bo_in_cpu_visible_vram(abo))
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 93207badf83f..5a85726ce853 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -336,15 +336,22 @@ uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
/*
* sub allocation
*/
+static inline struct amdgpu_sa_manager *
+to_amdgpu_sa_manager(struct drm_suballoc_manager *manager)
+{
+ return container_of(manager, struct amdgpu_sa_manager, base);
+}
-static inline uint64_t amdgpu_sa_bo_gpu_addr(struct amdgpu_sa_bo *sa_bo)
+static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->gpu_addr + sa_bo->soffset;
+ return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr +
+ drm_suballoc_soffset(sa_bo);
}
-static inline void * amdgpu_sa_bo_cpu_addr(struct amdgpu_sa_bo *sa_bo)
+static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+ return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr +
+ drm_suballoc_soffset(sa_bo);
}
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
@@ -355,11 +362,11 @@ void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager);
int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo **sa_bo,
- unsigned size, unsigned align);
+ struct drm_suballoc **sa_bo,
+ unsigned int size);
void amdgpu_sa_bo_free(struct amdgpu_device *adev,
- struct amdgpu_sa_bo **sa_bo,
- struct dma_fence *fence);
+ struct drm_suballoc **sa_bo,
+ struct dma_fence *fence);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 3989e755a5b4..018f36b10de8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -27,6 +27,7 @@
#include <drm/amdgpu_drm.h>
#include <drm/gpu_scheduler.h>
#include <drm/drm_print.h>
+#include <drm/drm_suballoc.h>
struct amdgpu_device;
struct amdgpu_ring;
@@ -92,7 +93,7 @@ enum amdgpu_ib_pool_type {
};
struct amdgpu_ib {
- struct amdgpu_sa_bo *sa_bo;
+ struct drm_suballoc *sa_bo;
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 524d10b21041..c6b4337eb20c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -44,327 +44,63 @@
#include "amdgpu.h"
-static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
-static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
-
int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain)
+ unsigned int size, u32 suballoc_align, u32 domain)
{
- int i, r;
-
- init_waitqueue_head(&sa_manager->wq);
- sa_manager->bo = NULL;
- sa_manager->size = size;
- sa_manager->domain = domain;
- sa_manager->align = align;
- sa_manager->hole = &sa_manager->olist;
- INIT_LIST_HEAD(&sa_manager->olist);
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- INIT_LIST_HEAD(&sa_manager->flist[i]);
+ int r;
- r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
- &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
+ r = amdgpu_bo_create_kernel(adev, size, AMDGPU_GPU_PAGE_SIZE, domain,
+ &sa_manager->bo, &sa_manager->gpu_addr,
+ &sa_manager->cpu_ptr);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
- memset(sa_manager->cpu_ptr, 0, sa_manager->size);
+ memset(sa_manager->cpu_ptr, 0, size);
+ drm_suballoc_manager_init(&sa_manager->base, size, suballoc_align);
return r;
}
void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
struct amdgpu_sa_manager *sa_manager)
{
- struct amdgpu_sa_bo *sa_bo, *tmp;
-
if (sa_manager->bo == NULL) {
dev_err(adev->dev, "no bo for sa manager\n");
return;
}
- if (!list_empty(&sa_manager->olist)) {
- sa_manager->hole = &sa_manager->olist,
- amdgpu_sa_bo_try_free(sa_manager);
- if (!list_empty(&sa_manager->olist)) {
- dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
- }
- }
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
- amdgpu_sa_bo_remove_locked(sa_bo);
- }
+ drm_suballoc_manager_fini(&sa_manager->base);
amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
- sa_manager->size = 0;
}
-static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
-{
- struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
- if (sa_manager->hole == &sa_bo->olist) {
- sa_manager->hole = sa_bo->olist.prev;
- }
- list_del_init(&sa_bo->olist);
- list_del_init(&sa_bo->flist);
- dma_fence_put(sa_bo->fence);
- kfree(sa_bo);
-}
-
-static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
+int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
+ struct drm_suballoc **sa_bo,
+ unsigned int size)
{
- struct amdgpu_sa_bo *sa_bo, *tmp;
+ struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
+ GFP_KERNEL, true, 0);
- if (sa_manager->hole->next == &sa_manager->olist)
- return;
+ if (IS_ERR(sa)) {
+ *sa_bo = NULL;
- sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
- list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
- if (sa_bo->fence == NULL ||
- !dma_fence_is_signaled(sa_bo->fence)) {
- return;
- }
- amdgpu_sa_bo_remove_locked(sa_bo);
+ return PTR_ERR(sa);
}
-}
-static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole != &sa_manager->olist) {
- return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
- }
+ *sa_bo = sa;
return 0;
}
-static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole->next != &sa_manager->olist) {
- return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
- }
- return sa_manager->size;
-}
-
-static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo *sa_bo,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- soffset += wasted;
-
- sa_bo->manager = sa_manager;
- sa_bo->soffset = soffset;
- sa_bo->eoffset = soffset + size;
- list_add(&sa_bo->olist, sa_manager->hole);
- INIT_LIST_HEAD(&sa_bo->flist);
- sa_manager->hole = &sa_bo->olist;
- return true;
- }
- return false;
-}
-
-/**
- * amdgpu_sa_event - Check if we can stop waiting
- *
- * @sa_manager: pointer to the sa_manager
- * @size: number of bytes we want to allocate
- * @align: alignment we need to match
- *
- * Check if either there is a fence we can wait for or
- * enough free memory to satisfy the allocation directly
- */
-static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
- int i;
-
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- if (!list_empty(&sa_manager->flist[i]))
- return true;
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- return true;
- }
-
- return false;
-}
-
-static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
- struct dma_fence **fences,
- unsigned *tries)
-{
- struct amdgpu_sa_bo *best_bo = NULL;
- unsigned i, soffset, best, tmp;
-
- /* if hole points to the end of the buffer */
- if (sa_manager->hole->next == &sa_manager->olist) {
- /* try again with its beginning */
- sa_manager->hole = &sa_manager->olist;
- return true;
- }
-
- soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
- /* to handle wrap around we add sa_manager->size */
- best = sa_manager->size * 2;
- /* go over all fence list and try to find the closest sa_bo
- * of the current last
- */
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
- struct amdgpu_sa_bo *sa_bo;
-
- fences[i] = NULL;
-
- if (list_empty(&sa_manager->flist[i]))
- continue;
-
- sa_bo = list_first_entry(&sa_manager->flist[i],
- struct amdgpu_sa_bo, flist);
-
- if (!dma_fence_is_signaled(sa_bo->fence)) {
- fences[i] = sa_bo->fence;
- continue;
- }
-
- /* limit the number of tries each ring gets */
- if (tries[i] > 2) {
- continue;
- }
-
- tmp = sa_bo->soffset;
- if (tmp < soffset) {
- /* wrap around, pretend it's after */
- tmp += sa_manager->size;
- }
- tmp -= soffset;
- if (tmp < best) {
- /* this sa bo is the closest one */
- best = tmp;
- best_bo = sa_bo;
- }
- }
-
- if (best_bo) {
- uint32_t idx = best_bo->fence->context;
-
- idx %= AMDGPU_SA_NUM_FENCE_LISTS;
- ++tries[idx];
- sa_manager->hole = best_bo->olist.prev;
-
- /* we knew that this one is signaled,
- so it's save to remote it */
- amdgpu_sa_bo_remove_locked(best_bo);
- return true;
- }
- return false;
-}
-
-int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
- struct amdgpu_sa_bo **sa_bo,
- unsigned size, unsigned align)
-{
- struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
- unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
- unsigned count;
- int i, r;
- signed long t;
-
- if (WARN_ON_ONCE(align > sa_manager->align))
- return -EINVAL;
-
- if (WARN_ON_ONCE(size > sa_manager->size))
- return -EINVAL;
-
- *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
- if (!(*sa_bo))
- return -ENOMEM;
- (*sa_bo)->manager = sa_manager;
- (*sa_bo)->fence = NULL;
- INIT_LIST_HEAD(&(*sa_bo)->olist);
- INIT_LIST_HEAD(&(*sa_bo)->flist);
-
- spin_lock(&sa_manager->wq.lock);
- do {
- for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- tries[i] = 0;
-
- do {
- amdgpu_sa_bo_try_free(sa_manager);
-
- if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
- size, align)) {
- spin_unlock(&sa_manager->wq.lock);
- return 0;
- }
-
- /* see if we can skip over some allocations */
- } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
-
- for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
- if (fences[i])
- fences[count++] = dma_fence_get(fences[i]);
-
- if (count) {
- spin_unlock(&sa_manager->wq.lock);
- t = dma_fence_wait_any_timeout(fences, count, false,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
- for (i = 0; i < count; ++i)
- dma_fence_put(fences[i]);
-
- r = (t > 0) ? 0 : t;
- spin_lock(&sa_manager->wq.lock);
- } else {
- /* if we have nothing to wait for block */
- r = wait_event_interruptible_locked(
- sa_manager->wq,
- amdgpu_sa_event(sa_manager, size, align)
- );
- }
-
- } while (!r);
-
- spin_unlock(&sa_manager->wq.lock);
- kfree(*sa_bo);
- *sa_bo = NULL;
- return r;
-}
-
-void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
+void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct drm_suballoc **sa_bo,
struct dma_fence *fence)
{
- struct amdgpu_sa_manager *sa_manager;
-
if (sa_bo == NULL || *sa_bo == NULL) {
return;
}
- sa_manager = (*sa_bo)->manager;
- spin_lock(&sa_manager->wq.lock);
- if (fence && !dma_fence_is_signaled(fence)) {
- uint32_t idx;
-
- (*sa_bo)->fence = dma_fence_get(fence);
- idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
- list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
- } else {
- amdgpu_sa_bo_remove_locked(*sa_bo);
- }
- wake_up_all_locked(&sa_manager->wq);
- spin_unlock(&sa_manager->wq.lock);
+ drm_suballoc_free(*sa_bo, fence);
*sa_bo = NULL;
}
@@ -373,26 +109,8 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m)
{
- struct amdgpu_sa_bo *i;
-
- spin_lock(&sa_manager->wq.lock);
- list_for_each_entry(i, &sa_manager->olist, olist) {
- uint64_t soffset = i->soffset + sa_manager->gpu_addr;
- uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
- if (&i->olist == sa_manager->hole) {
- seq_printf(m, ">");
- } else {
- seq_printf(m, " ");
- }
- seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
- soffset, eoffset, eoffset - soffset);
+ struct drm_printer p = drm_seq_file_printer(m);
- if (i->fence)
- seq_printf(m, " protected by 0x%016llx on context %llu",
- i->fence->seqno, i->fence->context);
-
- seq_printf(m, "\n");
- }
- spin_unlock(&sa_manager->wq.lock);
+ drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr);
}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index c5ef7f7bdc15..2cd081cbf706 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -466,11 +466,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
- /* Can't move a pinned BO */
abo = ttm_to_amdgpu_bo(bo);
- if (WARN_ON_ONCE(abo->tbo.pin_count > 0))
- return -EINVAL;
-
adev = amdgpu_ttm_adev(bo->bdev);
if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 589c1c66a6dc..cf040e2e9efe 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -649,7 +649,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm_to_malidp(drm);
- return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
+ return sysfs_emit(buf, "%08x\n", malidp->core_id);
}
static DEVICE_ATTR_RO(core_id);
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 56483860306b..fbb070f63e36 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -9,7 +9,7 @@
int ast_astdp_read_edid(struct drm_device *dev, u8 *ediddata)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 i = 0, j = 0;
/*
@@ -125,7 +125,7 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
u8 bDPTX = 0;
u8 bDPExecute = 1;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
// S3 come back, need more time to wait BMC ready.
if (bPower)
WaitCount = 300;
@@ -172,7 +172,7 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower)
void ast_dp_power_on_off(struct drm_device *dev, bool on)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
// Read and Turn off DP PHY sleep
u8 bE3 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE3, AST_DP_VIDEO_ENABLE);
@@ -188,7 +188,7 @@ void ast_dp_power_on_off(struct drm_device *dev, bool on)
void ast_dp_set_on_off(struct drm_device *dev, bool on)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 video_on_off = on;
// Video On/Off
@@ -208,7 +208,7 @@ void ast_dp_set_on_off(struct drm_device *dev, bool on)
void ast_dp_set_mode(struct drm_crtc *crtc, struct ast_vbios_mode_info *vbios_mode)
{
- struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_device *ast = to_ast_device(crtc->dev);
u32 ulRefreshRateIndex;
u8 ModeIdx;
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 4f75a9efb610..1bc35a992369 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -10,7 +10,7 @@ MODULE_FIRMWARE("ast_dp501_fw.bin");
static void ast_release_firmware(void *data)
{
- struct ast_private *ast = data;
+ struct ast_device *ast = data;
release_firmware(ast->dp501_fw);
ast->dp501_fw = NULL;
@@ -18,7 +18,7 @@ static void ast_release_firmware(void *data)
static int ast_load_dp501_microcode(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
int ret;
ret = request_firmware(&ast->dp501_fw, "ast_dp501_fw.bin", dev->dev);
@@ -28,7 +28,7 @@ static int ast_load_dp501_microcode(struct drm_device *dev)
return devm_add_action_or_reset(dev->dev, ast_release_firmware, ast);
}
-static void send_ack(struct ast_private *ast)
+static void send_ack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
@@ -36,7 +36,7 @@ static void send_ack(struct ast_private *ast)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
-static void send_nack(struct ast_private *ast)
+static void send_nack(struct ast_device *ast)
{
u8 sendack;
sendack = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0xff);
@@ -44,7 +44,7 @@ static void send_nack(struct ast_private *ast)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, 0x00, sendack);
}
-static bool wait_ack(struct ast_private *ast)
+static bool wait_ack(struct ast_device *ast)
{
u8 waitack;
u32 retry = 0;
@@ -60,7 +60,7 @@ static bool wait_ack(struct ast_private *ast)
return false;
}
-static bool wait_nack(struct ast_private *ast)
+static bool wait_nack(struct ast_device *ast)
{
u8 waitack;
u32 retry = 0;
@@ -76,18 +76,18 @@ static bool wait_nack(struct ast_private *ast)
return false;
}
-static void set_cmd_trigger(struct ast_private *ast)
+static void set_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x40);
}
-static void clear_cmd_trigger(struct ast_private *ast)
+static void clear_cmd_trigger(struct ast_device *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9b, ~0x40, 0x00);
}
#if 0
-static bool wait_fw_ready(struct ast_private *ast)
+static bool wait_fw_ready(struct ast_device *ast)
{
u8 waitready;
u32 retry = 0;
@@ -106,7 +106,7 @@ static bool wait_fw_ready(struct ast_private *ast)
static bool ast_write_cmd(struct drm_device *dev, u8 data)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
int retry = 0;
if (wait_nack(ast)) {
send_nack(ast);
@@ -128,7 +128,7 @@ static bool ast_write_cmd(struct drm_device *dev, u8 data)
static bool ast_write_data(struct drm_device *dev, u8 data)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
if (wait_nack(ast)) {
send_nack(ast);
@@ -146,7 +146,7 @@ static bool ast_write_data(struct drm_device *dev, u8 data)
#if 0
static bool ast_read_data(struct drm_device *dev, u8 *data)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 tmp;
*data = 0;
@@ -163,7 +163,7 @@ static bool ast_read_data(struct drm_device *dev, u8 *data)
return true;
}
-static void clear_cmd(struct ast_private *ast)
+static void clear_cmd(struct ast_device *ast)
{
send_nack(ast);
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x9a, 0x00, 0x00);
@@ -178,14 +178,14 @@ void ast_set_dp501_video_output(struct drm_device *dev, u8 mode)
msleep(10);
}
-static u32 get_fw_base(struct ast_private *ast)
+static u32 get_fw_base(struct ast_device *ast)
{
return ast_mindwm(ast, 0x1e6e2104) & 0x7fffffff;
}
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 i, data;
u32 boot_address;
@@ -204,7 +204,7 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
static bool ast_launch_m68k(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 i, data, len = 0;
u32 boot_address;
u8 *fw_addr = NULL;
@@ -274,7 +274,7 @@ static bool ast_launch_m68k(struct drm_device *dev)
bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 i, boot_address, offset, data;
u32 *pEDIDidx;
@@ -334,7 +334,7 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
static bool ast_init_dvo(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 jreg;
u32 data;
ast_write32(ast, 0xf004, 0x1e6e0000);
@@ -407,7 +407,7 @@ static bool ast_init_dvo(struct drm_device *dev)
static void ast_init_analog(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 data;
/*
@@ -434,7 +434,7 @@ static void ast_init_analog(struct drm_device *dev)
void ast_init_3rdtx(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 jreg;
if (ast->chip == AST2300 || ast->chip == AST2400) {
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index d78852c7cf5b..3a7af6d5aa79 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -105,7 +105,7 @@ static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev)
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct ast_private *ast;
+ struct ast_device *ast;
struct drm_device *dev;
int ret;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index d51b81fea9c8..a501169cddad 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -157,7 +157,7 @@ to_ast_sil164_connector(struct drm_connector *connector)
* Device
*/
-struct ast_private {
+struct ast_device {
struct drm_device base;
struct mutex ioregs_lock; /* Protects access to I/O registers in ioregs */
@@ -210,14 +210,14 @@ struct ast_private {
const struct firmware *dp501_fw; /* dp501 fw */
};
-static inline struct ast_private *to_ast_private(struct drm_device *dev)
+static inline struct ast_device *to_ast_device(struct drm_device *dev)
{
- return container_of(dev, struct ast_private, base);
+ return container_of(dev, struct ast_device, base);
}
-struct ast_private *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags);
+struct ast_device *ast_device_create(const struct drm_driver *drv,
+ struct pci_dev *pdev,
+ unsigned long flags);
#define AST_IO_AR_PORT_WRITE (0x40)
#define AST_IO_MISC_PORT_WRITE (0x42)
@@ -238,62 +238,44 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
-#define __ast_read(x) \
-static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
-u##x val = 0;\
-val = ioread##x(ast->regs + reg); \
-return val;\
+static inline u32 ast_read32(struct ast_device *ast, u32 reg)
+{
+ return ioread32(ast->regs + reg);
}
-__ast_read(8);
-__ast_read(16);
-__ast_read(32)
-
-#define __ast_io_read(x) \
-static inline u##x ast_io_read##x(struct ast_private *ast, u32 reg) { \
-u##x val = 0;\
-val = ioread##x(ast->ioregs + reg); \
-return val;\
+static inline void ast_write32(struct ast_device *ast, u32 reg, u32 val)
+{
+ iowrite32(val, ast->regs + reg);
}
-__ast_io_read(8);
-__ast_io_read(16);
-__ast_io_read(32);
-
-#define __ast_write(x) \
-static inline void ast_write##x(struct ast_private *ast, u32 reg, u##x val) {\
- iowrite##x(val, ast->regs + reg);\
- }
-
-__ast_write(8);
-__ast_write(16);
-__ast_write(32);
-
-#define __ast_io_write(x) \
-static inline void ast_io_write##x(struct ast_private *ast, u32 reg, u##x val) {\
- iowrite##x(val, ast->ioregs + reg);\
- }
+static inline u8 ast_io_read8(struct ast_device *ast, u32 reg)
+{
+ return ioread8(ast->ioregs + reg);
+}
-__ast_io_write(8);
-__ast_io_write(16);
-#undef __ast_io_write
+static inline void ast_io_write8(struct ast_device *ast, u32 reg, u8 val)
+{
+ iowrite8(val, ast->ioregs + reg);
+}
-static inline void ast_set_index_reg(struct ast_private *ast,
+static inline void ast_set_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t val)
{
- ast_io_write16(ast, base, ((u16)val << 8) | index);
+ ast_io_write8(ast, base, index);
+ ++base;
+ ast_io_write8(ast, base, val);
}
-void ast_set_index_reg_mask(struct ast_private *ast,
+void ast_set_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t mask, uint8_t val);
-uint8_t ast_get_index_reg(struct ast_private *ast,
+uint8_t ast_get_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index);
-uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+uint8_t ast_get_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index, uint8_t mask);
-static inline void ast_open_key(struct ast_private *ast)
+static inline void ast_open_key(struct ast_device *ast)
{
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
}
@@ -352,7 +334,7 @@ struct ast_crtc_state {
#define to_ast_crtc_state(state) container_of(state, struct ast_crtc_state, base)
-int ast_mode_config_init(struct ast_private *ast);
+int ast_mode_config_init(struct ast_device *ast);
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
@@ -476,16 +458,16 @@ int ast_mode_config_init(struct ast_private *ast);
#define ASTDP_1366x768_60 0x1E
#define ASTDP_1152x864_75 0x1F
-int ast_mm_init(struct ast_private *ast);
+int ast_mm_init(struct ast_device *ast);
/* ast post */
void ast_enable_vga(struct drm_device *dev);
void ast_enable_mmio(struct drm_device *dev);
bool ast_is_vga_enabled(struct drm_device *dev);
void ast_post_gpu(struct drm_device *dev);
-u32 ast_mindwm(struct ast_private *ast, u32 r);
-void ast_moutdwm(struct ast_private *ast, u32 r, u32 v);
-void ast_patch_ahb_2500(struct ast_private *ast);
+u32 ast_mindwm(struct ast_device *ast, u32 r);
+void ast_moutdwm(struct ast_device *ast, u32 r, u32 v);
+void ast_patch_ahb_2500(struct ast_device *ast);
/* ast dp501 */
void ast_set_dp501_video_output(struct drm_device *dev, u8 mode);
bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size);
diff --git a/drivers/gpu/drm/ast/ast_i2c.c b/drivers/gpu/drm/ast/ast_i2c.c
index 93e91c36d649..d64045c0b849 100644
--- a/drivers/gpu/drm/ast/ast_i2c.c
+++ b/drivers/gpu/drm/ast/ast_i2c.c
@@ -29,7 +29,7 @@
static void ast_i2c_setsda(void *i2c_priv, int data)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
int i;
u8 ujcrb7, jtemp;
@@ -45,7 +45,7 @@ static void ast_i2c_setsda(void *i2c_priv, int data)
static void ast_i2c_setscl(void *i2c_priv, int clock)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
int i;
u8 ujcrb7, jtemp;
@@ -61,7 +61,7 @@ static void ast_i2c_setscl(void *i2c_priv, int clock)
static int ast_i2c_getsda(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
@@ -83,7 +83,7 @@ static int ast_i2c_getsda(void *i2c_priv)
static int ast_i2c_getscl(void *i2c_priv)
{
struct ast_i2c_chan *i2c = i2c_priv;
- struct ast_private *ast = to_ast_private(i2c->dev);
+ struct ast_device *ast = to_ast_device(i2c->dev);
uint32_t val, val2, count, pass;
count = 0;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f83ce77127cb..794ffd4a29c5 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -35,7 +35,7 @@
#include "ast_drv.h"
-void ast_set_index_reg_mask(struct ast_private *ast,
+void ast_set_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index,
uint8_t mask, uint8_t val)
{
@@ -45,7 +45,7 @@ void ast_set_index_reg_mask(struct ast_private *ast,
ast_set_index_reg(ast, base, index, tmp);
}
-uint8_t ast_get_index_reg(struct ast_private *ast,
+uint8_t ast_get_index_reg(struct ast_device *ast,
uint32_t base, uint8_t index)
{
uint8_t ret;
@@ -54,7 +54,7 @@ uint8_t ast_get_index_reg(struct ast_private *ast,
return ret;
}
-uint8_t ast_get_index_reg_mask(struct ast_private *ast,
+uint8_t ast_get_index_reg_mask(struct ast_device *ast,
uint32_t base, uint8_t index, uint8_t mask)
{
uint8_t ret;
@@ -66,7 +66,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
{
struct device_node *np = dev->dev->of_node;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t data, jregd0, jregd1;
@@ -122,7 +122,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
uint32_t jreg, scu_rev;
@@ -271,7 +271,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
static int ast_get_dram_info(struct drm_device *dev)
{
struct device_node *np = dev->dev->of_node;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
uint32_t denum, num, div, ref_pll, dsel;
@@ -394,22 +394,22 @@ static int ast_get_dram_info(struct drm_device *dev)
*/
static void ast_device_release(void *data)
{
- struct ast_private *ast = data;
+ struct ast_device *ast = data;
/* enable standard VGA decode */
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x04);
}
-struct ast_private *ast_device_create(const struct drm_driver *drv,
- struct pci_dev *pdev,
- unsigned long flags)
+struct ast_device *ast_device_create(const struct drm_driver *drv,
+ struct pci_dev *pdev,
+ unsigned long flags)
{
struct drm_device *dev;
- struct ast_private *ast;
+ struct ast_device *ast;
bool need_post;
int ret = 0;
- ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_private, base);
+ ast = devm_drm_dev_alloc(&pdev->dev, drv, struct ast_device, base);
if (IS_ERR(ast))
return ast;
dev = &ast->base;
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 248284a4b3ff..e16af60deef9 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -33,7 +33,7 @@
#include "ast_drv.h"
-static u32 ast_get_vram_size(struct ast_private *ast)
+static u32 ast_get_vram_size(struct ast_device *ast)
{
u8 jreg;
u32 vram_size;
@@ -73,7 +73,7 @@ static u32 ast_get_vram_size(struct ast_private *ast)
return vram_size;
}
-int ast_mm_init(struct ast_private *ast)
+int ast_mm_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 984ec590a7e7..36374828f6c8 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -51,7 +51,7 @@
#define AST_LUT_SIZE 256
-static inline void ast_load_palette_index(struct ast_private *ast,
+static inline void ast_load_palette_index(struct ast_device *ast,
u8 index, u8 red, u8 green,
u8 blue)
{
@@ -65,7 +65,7 @@ static inline void ast_load_palette_index(struct ast_private *ast,
ast_io_read8(ast, AST_IO_SEQ_PORT);
}
-static void ast_crtc_set_gamma_linear(struct ast_private *ast,
+static void ast_crtc_set_gamma_linear(struct ast_device *ast,
const struct drm_format_info *format)
{
int i;
@@ -84,7 +84,7 @@ static void ast_crtc_set_gamma_linear(struct ast_private *ast,
}
}
-static void ast_crtc_set_gamma(struct ast_private *ast,
+static void ast_crtc_set_gamma(struct ast_device *ast,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
@@ -232,7 +232,7 @@ static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
return true;
}
-static void ast_set_vbios_color_reg(struct ast_private *ast,
+static void ast_set_vbios_color_reg(struct ast_device *ast,
const struct drm_format_info *format,
const struct ast_vbios_mode_info *vbios_mode)
{
@@ -263,7 +263,7 @@ static void ast_set_vbios_color_reg(struct ast_private *ast,
}
}
-static void ast_set_vbios_mode_reg(struct ast_private *ast,
+static void ast_set_vbios_mode_reg(struct ast_device *ast,
const struct drm_display_mode *adjusted_mode,
const struct ast_vbios_mode_info *vbios_mode)
{
@@ -287,7 +287,7 @@ static void ast_set_vbios_mode_reg(struct ast_private *ast,
}
}
-static void ast_set_std_reg(struct ast_private *ast,
+static void ast_set_std_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -335,7 +335,7 @@ static void ast_set_std_reg(struct ast_private *ast,
ast_set_index_reg(ast, AST_IO_GR_PORT, i, stdtable->gr[i]);
}
-static void ast_set_crtc_reg(struct ast_private *ast,
+static void ast_set_crtc_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -450,7 +450,7 @@ static void ast_set_crtc_reg(struct ast_private *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0x11, 0x7f, 0x80);
}
-static void ast_set_offset_reg(struct ast_private *ast,
+static void ast_set_offset_reg(struct ast_device *ast,
struct drm_framebuffer *fb)
{
u16 offset;
@@ -460,7 +460,7 @@ static void ast_set_offset_reg(struct ast_private *ast,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f);
}
-static void ast_set_dclk_reg(struct ast_private *ast,
+static void ast_set_dclk_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -478,7 +478,7 @@ static void ast_set_dclk_reg(struct ast_private *ast,
((clk_info->param3 & 0x3) << 4));
}
-static void ast_set_color_reg(struct ast_private *ast,
+static void ast_set_color_reg(struct ast_device *ast,
const struct drm_format_info *format)
{
u8 jregA0 = 0, jregA3 = 0, jregA8 = 0;
@@ -507,7 +507,7 @@ static void ast_set_color_reg(struct ast_private *ast,
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa8, 0xfd, jregA8);
}
-static void ast_set_crtthd_reg(struct ast_private *ast)
+static void ast_set_crtthd_reg(struct ast_device *ast)
{
/* Set Threshold */
if (ast->chip == AST2600) {
@@ -529,7 +529,7 @@ static void ast_set_crtthd_reg(struct ast_private *ast)
}
}
-static void ast_set_sync_reg(struct ast_private *ast,
+static void ast_set_sync_reg(struct ast_device *ast,
struct drm_display_mode *mode,
struct ast_vbios_mode_info *vbios_mode)
{
@@ -544,7 +544,7 @@ static void ast_set_sync_reg(struct ast_private *ast,
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, jreg);
}
-static void ast_set_start_address_crt1(struct ast_private *ast,
+static void ast_set_start_address_crt1(struct ast_device *ast,
unsigned int offset)
{
u32 addr;
@@ -556,7 +556,7 @@ static void ast_set_start_address_crt1(struct ast_private *ast,
}
-static void ast_wait_for_vretrace(struct ast_private *ast)
+static void ast_wait_for_vretrace(struct ast_device *ast)
{
unsigned long timeout = jiffies + HZ;
u8 vgair1;
@@ -645,7 +645,7 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_device *dev = plane->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
@@ -672,23 +672,34 @@ static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
/*
* Some BMCs stop scanning out the video signal after the driver
- * reprogrammed the offset or scanout address. This stalls display
- * output for several seconds and makes the display unusable.
- * Therefore only update the offset if it changes and reprogram the
- * address after enabling the plane.
+ * reprogrammed the offset. This stalls display output for several
+ * seconds and makes the display unusable. Therefore only update
+ * the offset if it changes.
*/
if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
ast_set_offset_reg(ast, fb);
- if (!old_fb) {
- ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
- ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
- }
+}
+
+static void ast_primary_plane_helper_atomic_enable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct ast_device *ast = to_ast_device(plane->dev);
+ struct ast_plane *ast_plane = to_ast_plane(plane);
+
+ /*
+ * Some BMCs stop scanning out the video signal after the driver
+ * reprogrammed the scanout address. This stalls display
+ * output for several seconds and makes the display unusable.
+ * Therefore only reprogram the address after enabling the plane.
+ */
+ ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
+ ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
}
static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
- struct ast_private *ast = to_ast_private(plane->dev);
+ struct ast_device *ast = to_ast_device(plane->dev);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x20);
}
@@ -697,6 +708,7 @@ static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_primary_plane_helper_atomic_check,
.atomic_update = ast_primary_plane_helper_atomic_update,
+ .atomic_enable = ast_primary_plane_helper_atomic_enable,
.atomic_disable = ast_primary_plane_helper_atomic_disable,
};
@@ -707,7 +719,7 @@ static const struct drm_plane_funcs ast_primary_plane_funcs = {
DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static int ast_primary_plane_init(struct ast_private *ast)
+static int ast_primary_plane_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
@@ -800,7 +812,7 @@ static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, i
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
}
-static void ast_set_cursor_base(struct ast_private *ast, u64 address)
+static void ast_set_cursor_base(struct ast_device *ast, u64 address)
{
u8 addr0 = (address >> 3) & 0xff;
u8 addr1 = (address >> 11) & 0xff;
@@ -811,7 +823,7 @@ static void ast_set_cursor_base(struct ast_private *ast, u64 address)
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
}
-static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y,
+static void ast_set_cursor_location(struct ast_device *ast, u16 x, u16 y,
u8 x_offset, u8 y_offset)
{
u8 x0 = (x & 0x00ff);
@@ -827,7 +839,7 @@ static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y,
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
}
-static void ast_set_cursor_enabled(struct ast_private *ast, bool enabled)
+static void ast_set_cursor_enabled(struct ast_device *ast, bool enabled)
{
static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
AST_IO_VGACRCB_HWC_ENABLED);
@@ -876,7 +888,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
- struct ast_private *ast = to_ast_private(plane->dev);
+ struct ast_device *ast = to_ast_device(plane->dev);
struct iosys_map src_map = shadow_plane_state->data[0];
struct drm_rect damage;
const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
@@ -931,7 +943,7 @@ static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
- struct ast_private *ast = to_ast_private(plane->dev);
+ struct ast_device *ast = to_ast_device(plane->dev);
ast_set_cursor_enabled(ast, false);
}
@@ -950,7 +962,7 @@ static const struct drm_plane_funcs ast_cursor_plane_funcs = {
DRM_GEM_SHADOW_PLANE_FUNCS,
};
-static int ast_cursor_plane_init(struct ast_private *ast)
+static int ast_cursor_plane_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct ast_plane *ast_cursor_plane = &ast->cursor_plane;
@@ -995,7 +1007,7 @@ static int ast_cursor_plane_init(struct ast_private *ast)
static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_device *ast = to_ast_device(crtc->dev);
u8 ch = AST_DPMS_VSYNC_OFF | AST_DPMS_HSYNC_OFF;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
@@ -1052,7 +1064,7 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
static enum drm_mode_status
ast_crtc_helper_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode)
{
- struct ast_private *ast = to_ast_private(crtc->dev);
+ struct ast_device *ast = to_ast_device(crtc->dev);
enum drm_mode_status status;
uint32_t jtemp;
@@ -1177,7 +1189,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_device *dev = crtc->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
@@ -1202,7 +1214,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info =
@@ -1224,7 +1236,7 @@ static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_ato
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct drm_device *dev = crtc->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
ast_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
@@ -1312,7 +1324,7 @@ static const struct drm_crtc_funcs ast_crtc_funcs = {
static int ast_crtc_init(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct drm_crtc *crtc = &ast->crtc;
int ret;
@@ -1338,7 +1350,7 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
{
struct ast_vga_connector *ast_vga_connector = to_ast_vga_connector(connector);
struct drm_device *dev = connector->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct edid *edid;
int count;
@@ -1411,7 +1423,7 @@ static int ast_vga_connector_init(struct drm_device *dev,
return 0;
}
-static int ast_vga_output_init(struct ast_private *ast)
+static int ast_vga_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1444,7 +1456,7 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
{
struct ast_sil164_connector *ast_sil164_connector = to_ast_sil164_connector(connector);
struct drm_device *dev = connector->dev;
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct edid *edid;
int count;
@@ -1517,7 +1529,7 @@ static int ast_sil164_connector_init(struct drm_device *dev,
return 0;
}
-static int ast_sil164_output_init(struct ast_private *ast)
+static int ast_sil164_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1604,7 +1616,7 @@ static int ast_dp501_connector_init(struct drm_device *dev, struct drm_connector
return 0;
}
-static int ast_dp501_output_init(struct ast_private *ast)
+static int ast_dp501_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1691,7 +1703,7 @@ static int ast_astdp_connector_init(struct drm_device *dev, struct drm_connector
return 0;
}
-static int ast_astdp_output_init(struct ast_private *ast)
+static int ast_astdp_output_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
struct drm_crtc *crtc = &ast->crtc;
@@ -1721,7 +1733,7 @@ static int ast_astdp_output_init(struct ast_private *ast)
static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state)
{
- struct ast_private *ast = to_ast_private(state->dev);
+ struct ast_device *ast = to_ast_device(state->dev);
/*
* Concurrent operations could possibly trigger a call to
@@ -1742,7 +1754,7 @@ static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
unsigned long fbsize, fbpages, max_fbpages;
max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
@@ -1763,7 +1775,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-int ast_mode_config_init(struct ast_private *ast)
+int ast_mode_config_init(struct ast_device *ast)
{
struct drm_device *dev = &ast->base;
int ret;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 82fd3c8adee1..71bb36b865fd 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -39,7 +39,7 @@ static void ast_post_chip_2500(struct drm_device *dev);
void ast_enable_vga(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
ast_io_write8(ast, AST_IO_VGA_ENABLE_PORT, 0x01);
ast_io_write8(ast, AST_IO_MISC_PORT_WRITE, 0x01);
@@ -47,7 +47,7 @@ void ast_enable_vga(struct drm_device *dev)
void ast_enable_mmio(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xa1, 0x06);
}
@@ -55,7 +55,7 @@ void ast_enable_mmio(struct drm_device *dev)
bool ast_is_vga_enabled(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 ch;
ch = ast_io_read8(ast, AST_IO_VGA_ENABLE_PORT);
@@ -70,7 +70,7 @@ static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff };
static void
ast_set_def_ext_reg(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 i, index, reg;
const u8 *ext_reg_info;
@@ -110,7 +110,7 @@ ast_set_def_ext_reg(struct drm_device *dev)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xff, reg);
}
-u32 ast_mindwm(struct ast_private *ast, u32 r)
+u32 ast_mindwm(struct ast_device *ast, u32 r)
{
uint32_t data;
@@ -123,7 +123,7 @@ u32 ast_mindwm(struct ast_private *ast, u32 r)
return ast_read32(ast, 0x10000 + (r & 0x0000ffff));
}
-void ast_moutdwm(struct ast_private *ast, u32 r, u32 v)
+void ast_moutdwm(struct ast_device *ast, u32 r, u32 v)
{
uint32_t data;
ast_write32(ast, 0xf004, r & 0xffff0000);
@@ -162,7 +162,7 @@ static const u32 pattern_AST2150[14] = {
0x20F050E0
};
-static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
+static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen)
{
u32 data, timeout;
@@ -192,7 +192,7 @@ static u32 mmctestburst2_ast2150(struct ast_private *ast, u32 datagen)
}
#if 0 /* unused in DDX driver - here for completeness */
-static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
+static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen)
{
u32 data, timeout;
@@ -212,7 +212,7 @@ static u32 mmctestsingle2_ast2150(struct ast_private *ast, u32 datagen)
}
#endif
-static int cbrtest_ast2150(struct ast_private *ast)
+static int cbrtest_ast2150(struct ast_device *ast)
{
int i;
@@ -222,7 +222,7 @@ static int cbrtest_ast2150(struct ast_private *ast)
return 1;
}
-static int cbrscan_ast2150(struct ast_private *ast, int busw)
+static int cbrscan_ast2150(struct ast_device *ast, int busw)
{
u32 patcnt, loop;
@@ -239,7 +239,7 @@ static int cbrscan_ast2150(struct ast_private *ast, int busw)
}
-static void cbrdlli_ast2150(struct ast_private *ast, int busw)
+static void cbrdlli_ast2150(struct ast_device *ast, int busw)
{
u32 dll_min[4], dll_max[4], dlli, data, passcnt;
@@ -273,7 +273,7 @@ cbr_start:
static void ast_init_dram_reg(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u8 j;
u32 data, temp, i;
const struct ast_dramstruct *dram_reg_info;
@@ -366,7 +366,7 @@ static void ast_init_dram_reg(struct drm_device *dev)
void ast_post_gpu(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 reg;
@@ -449,7 +449,7 @@ static const u32 pattern[8] = {
0x7C61D253
};
-static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl)
+static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -469,7 +469,7 @@ static bool mmc_test(struct ast_private *ast, u32 datagen, u8 test_ctl)
return true;
}
-static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl)
+static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl)
{
u32 data, timeout;
@@ -490,32 +490,32 @@ static u32 mmc_test2(struct ast_private *ast, u32 datagen, u8 test_ctl)
}
-static bool mmc_test_burst(struct ast_private *ast, u32 datagen)
+static bool mmc_test_burst(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc1);
}
-static u32 mmc_test_burst2(struct ast_private *ast, u32 datagen)
+static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen)
{
return mmc_test2(ast, datagen, 0x41);
}
-static bool mmc_test_single(struct ast_private *ast, u32 datagen)
+static bool mmc_test_single(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0xc5);
}
-static u32 mmc_test_single2(struct ast_private *ast, u32 datagen)
+static u32 mmc_test_single2(struct ast_device *ast, u32 datagen)
{
return mmc_test2(ast, datagen, 0x05);
}
-static bool mmc_test_single_2500(struct ast_private *ast, u32 datagen)
+static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen)
{
return mmc_test(ast, datagen, 0x85);
}
-static int cbr_test(struct ast_private *ast)
+static int cbr_test(struct ast_device *ast)
{
u32 data;
int i;
@@ -534,7 +534,7 @@ static int cbr_test(struct ast_private *ast)
return 1;
}
-static int cbr_scan(struct ast_private *ast)
+static int cbr_scan(struct ast_device *ast)
{
u32 data, data2, patcnt, loop;
@@ -555,7 +555,7 @@ static int cbr_scan(struct ast_private *ast)
return data2;
}
-static u32 cbr_test2(struct ast_private *ast)
+static u32 cbr_test2(struct ast_device *ast)
{
u32 data;
@@ -569,7 +569,7 @@ static u32 cbr_test2(struct ast_private *ast)
return ~data & 0xffff;
}
-static u32 cbr_scan2(struct ast_private *ast)
+static u32 cbr_scan2(struct ast_device *ast)
{
u32 data, data2, patcnt, loop;
@@ -590,7 +590,7 @@ static u32 cbr_scan2(struct ast_private *ast)
return data2;
}
-static bool cbr_test3(struct ast_private *ast)
+static bool cbr_test3(struct ast_device *ast)
{
if (!mmc_test_burst(ast, 0))
return false;
@@ -599,7 +599,7 @@ static bool cbr_test3(struct ast_private *ast)
return true;
}
-static bool cbr_scan3(struct ast_private *ast)
+static bool cbr_scan3(struct ast_device *ast)
{
u32 patcnt, loop;
@@ -615,7 +615,7 @@ static bool cbr_scan3(struct ast_private *ast)
return true;
}
-static bool finetuneDQI_L(struct ast_private *ast, struct ast2300_dram_param *param)
+static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0;
bool status = false;
@@ -714,7 +714,7 @@ FINETUNE_DONE:
return status;
} /* finetuneDQI_L */
-static void finetuneDQSI(struct ast_private *ast)
+static void finetuneDQSI(struct ast_device *ast)
{
u32 dlli, dqsip, dqidly;
u32 reg_mcr18, reg_mcr0c, passcnt[2], diff;
@@ -804,7 +804,7 @@ static void finetuneDQSI(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0018, reg_mcr18);
}
-static bool cbr_dll2(struct ast_private *ast, struct ast2300_dram_param *param)
+static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0;
bool status = false;
@@ -860,7 +860,7 @@ CBR_DONE2:
return status;
} /* CBRDLL2 */
-static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *param)
+static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
@@ -1102,7 +1102,7 @@ static void get_ddr3_info(struct ast_private *ast, struct ast2300_dram_param *pa
}
-static void ddr3_init(struct ast_private *ast, struct ast2300_dram_param *param)
+static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 data, data2, retry = 0;
@@ -1225,7 +1225,7 @@ ddr3_init_start:
}
-static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *param)
+static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 trap, trap_AC2, trap_MRS;
@@ -1472,7 +1472,7 @@ static void get_ddr2_info(struct ast_private *ast, struct ast2300_dram_param *pa
}
}
-static void ddr2_init(struct ast_private *ast, struct ast2300_dram_param *param)
+static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param)
{
u32 data, data2, retry = 0;
@@ -1600,7 +1600,7 @@ ddr2_init_start:
static void ast_post_chip_2300(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
struct ast2300_dram_param param;
u32 temp;
u8 reg;
@@ -1681,7 +1681,7 @@ static void ast_post_chip_2300(struct drm_device *dev)
} while ((reg & 0x40) == 0);
}
-static bool cbr_test_2500(struct ast_private *ast)
+static bool cbr_test_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
@@ -1692,7 +1692,7 @@ static bool cbr_test_2500(struct ast_private *ast)
return true;
}
-static bool ddr_test_2500(struct ast_private *ast)
+static bool ddr_test_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF);
ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00);
@@ -1709,7 +1709,7 @@ static bool ddr_test_2500(struct ast_private *ast)
return true;
}
-static void ddr_init_common_2500(struct ast_private *ast)
+static void ddr_init_common_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E6E0034, 0x00020080);
ast_moutdwm(ast, 0x1E6E0008, 0x2003000F);
@@ -1732,7 +1732,7 @@ static void ddr_init_common_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E024C, 0x80808080);
}
-static void ddr_phy_init_2500(struct ast_private *ast)
+static void ddr_phy_init_2500(struct ast_device *ast)
{
u32 data, pass, timecnt;
@@ -1766,7 +1766,7 @@ static void ddr_phy_init_2500(struct ast_private *ast)
* 4Gb : 0x80000000 ~ 0x9FFFFFFF
* 8Gb : 0x80000000 ~ 0xBFFFFFFF
*/
-static void check_dram_size_2500(struct ast_private *ast, u32 tRFC)
+static void check_dram_size_2500(struct ast_device *ast, u32 tRFC)
{
u32 reg_04, reg_14;
@@ -1797,7 +1797,7 @@ static void check_dram_size_2500(struct ast_private *ast, u32 tRFC)
ast_moutdwm(ast, 0x1E6E0014, reg_14);
}
-static void enable_cache_2500(struct ast_private *ast)
+static void enable_cache_2500(struct ast_device *ast)
{
u32 reg_04, data;
@@ -1810,7 +1810,7 @@ static void enable_cache_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400);
}
-static void set_mpll_2500(struct ast_private *ast)
+static void set_mpll_2500(struct ast_device *ast)
{
u32 addr, data, param;
@@ -1837,7 +1837,7 @@ static void set_mpll_2500(struct ast_private *ast)
udelay(100);
}
-static void reset_mmc_2500(struct ast_private *ast)
+static void reset_mmc_2500(struct ast_device *ast)
{
ast_moutdwm(ast, 0x1E78505C, 0x00000004);
ast_moutdwm(ast, 0x1E785044, 0x00000001);
@@ -1848,7 +1848,7 @@ static void reset_mmc_2500(struct ast_private *ast)
ast_moutdwm(ast, 0x1E6E0000, 0xFC600309);
}
-static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table)
+static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table)
{
ast_moutdwm(ast, 0x1E6E0004, 0x00000303);
@@ -1892,7 +1892,7 @@ static void ddr3_init_2500(struct ast_private *ast, const u32 *ddr_table)
ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
}
-static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table)
+static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table)
{
u32 data, data2, pass, retrycnt;
u32 ddr_vref, phy_vref;
@@ -2002,7 +2002,7 @@ static void ddr4_init_2500(struct ast_private *ast, const u32 *ddr_table)
ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00);
}
-static bool ast_dram_init_2500(struct ast_private *ast)
+static bool ast_dram_init_2500(struct ast_device *ast)
{
u32 data;
u32 max_tries = 5;
@@ -2030,7 +2030,7 @@ static bool ast_dram_init_2500(struct ast_private *ast)
return true;
}
-void ast_patch_ahb_2500(struct ast_private *ast)
+void ast_patch_ahb_2500(struct ast_device *ast)
{
u32 data;
@@ -2066,7 +2066,7 @@ void ast_patch_ahb_2500(struct ast_private *ast)
void ast_post_chip_2500(struct drm_device *dev)
{
- struct ast_private *ast = to_ast_private(dev);
+ struct ast_device *ast = to_ast_device(dev);
u32 temp;
u8 reg;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 8b2226f72b24..12e8f30c65f7 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -326,7 +326,7 @@ config DRM_TI_DLPC3433
input that produces a DMD output in RGB565, RGB666, RGB888
formats.
- It supports upto 720p resolution with 60 and 120 Hz refresh
+ It supports up to 720p resolution with 60 and 120 Hz refresh
rates.
config DRM_TI_TFP410
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index e8aae3cdc73d..d4b112911a99 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -81,6 +81,8 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
return ret;
}
+ drm_panel_bridge_set_orientation(connector, bridge);
+
drm_connector_attach_encoder(&panel_bridge->connector,
bridge->encoder);
diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c
index 0b6a28436885..77f7f7f54757 100644
--- a/drivers/gpu/drm/bridge/tc358762.c
+++ b/drivers/gpu/drm/bridge/tc358762.c
@@ -229,6 +229,7 @@ static int tc358762_probe(struct mipi_dsi_device *dsi)
ctx->bridge.funcs = &tc358762_bridge_funcs;
ctx->bridge.type = DRM_MODE_CONNECTOR_DPI;
ctx->bridge.of_node = dev->of_node;
+ ctx->bridge.pre_enable_prev_first = true;
drm_bridge_add(&ctx->bridge);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index d579fd8f7cb8..8606876f7233 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -2702,6 +2702,11 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
funcs->atomic_disable(plane, old_state);
} else if (new_plane_state->crtc || disabling) {
funcs->atomic_update(plane, old_state);
+
+ if (!disabling && funcs->atomic_enable) {
+ if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
+ funcs->atomic_enable(plane, old_state);
+ }
}
}
@@ -2762,6 +2767,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
struct drm_plane_state *new_plane_state =
drm_atomic_get_new_plane_state(old_state, plane);
const struct drm_plane_helper_funcs *plane_funcs;
+ bool disabling;
plane_funcs = plane->helper_private;
@@ -2771,12 +2777,18 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
WARN_ON(new_plane_state->crtc &&
new_plane_state->crtc != crtc);
- if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
- plane_funcs->atomic_disable)
+ disabling = drm_atomic_plane_disabling(old_plane_state, new_plane_state);
+
+ if (disabling && plane_funcs->atomic_disable) {
plane_funcs->atomic_disable(plane, old_state);
- else if (new_plane_state->crtc ||
- drm_atomic_plane_disabling(old_plane_state, new_plane_state))
+ } else if (new_plane_state->crtc || disabling) {
plane_funcs->atomic_update(plane, old_state);
+
+ if (!disabling && plane_funcs->atomic_enable) {
+ if (drm_atomic_plane_enabling(old_plane_state, new_plane_state))
+ plane_funcs->atomic_enable(plane, old_state);
+ }
+ }
}
if (crtc_funcs && crtc_funcs->atomic_flush)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 9d0250c28e9b..4b12c7a39ee3 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -33,9 +33,11 @@
#include <drm/drm_sysfs.h>
#include <drm/drm_utils.h>
-#include <linux/fb.h>
+#include <linux/property.h>
#include <linux/uaccess.h>
+#include <video/cmdline.h>
+
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -154,9 +156,10 @@ EXPORT_SYMBOL(drm_get_connector_type_name);
static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
{
struct drm_cmdline_mode *mode = &connector->cmdline_mode;
- char *option = NULL;
+ const char *option;
- if (fb_get_options(connector->name, &option))
+ option = video_get_options(connector->name);
+ if (!option)
return;
if (!drm_mode_parse_command_line_for_connector(option,
@@ -1446,6 +1449,20 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* a firmware handled hotkey. Therefor userspace must not include the
* privacy-screen sw-state in an atomic commit unless it wants to change
* its value.
+ *
+ * left margin, right margin, top margin, bottom margin:
+ * Add margins to the connector's viewport. This is typically used to
+ * mitigate underscan on TVs.
+ *
+ * The value is the size in pixels of the black border which will be
+ * added. The attached CRTC's content will be scaled to fill the whole
+ * area inside the margin.
+ *
+ * The margins configuration might be sent to the sink, e.g. via HDMI AVI
+ * InfoFrames.
+ *
+ * Drivers can set up these properties by calling
+ * drm_mode_create_tv_margin_properties().
*/
int drm_connector_create_standard_properties(struct drm_device *dev)
@@ -1590,10 +1607,6 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
/*
* TODO: Document the properties:
- * - left margin
- * - right margin
- * - top margin
- * - bottom margin
* - brightness
* - contrast
* - flicker reduction
@@ -1602,7 +1615,6 @@ EXPORT_SYMBOL(drm_connector_attach_dp_subconnector_property);
* - overscan
* - saturation
* - select subconnector
- * - subconnector
*/
/**
* DOC: Analog TV Connector Properties
diff --git a/drivers/gpu/drm/drm_displayid.c b/drivers/gpu/drm/drm_displayid.c
index 38ea8203df45..9edc111be7ee 100644
--- a/drivers/gpu/drm/drm_displayid.c
+++ b/drivers/gpu/drm/drm_displayid.c
@@ -7,13 +7,29 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
-static int validate_displayid(const u8 *displayid, int length, int idx)
+static const struct displayid_header *
+displayid_get_header(const u8 *displayid, int length, int index)
+{
+ const struct displayid_header *base;
+
+ if (sizeof(*base) > length - index)
+ return ERR_PTR(-EINVAL);
+
+ base = (const struct displayid_header *)&displayid[index];
+
+ return base;
+}
+
+static const struct displayid_header *
+validate_displayid(const u8 *displayid, int length, int idx)
{
int i, dispid_length;
u8 csum = 0;
const struct displayid_header *base;
- base = (const struct displayid_header *)&displayid[idx];
+ base = displayid_get_header(displayid, length, idx);
+ if (IS_ERR(base))
+ return base;
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
@@ -21,16 +37,16 @@ static int validate_displayid(const u8 *displayid, int length, int idx)
/* +1 for DispID checksum */
dispid_length = sizeof(*base) + base->bytes + 1;
if (dispid_length > length - idx)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
for (i = 0; i < dispid_length; i++)
csum += displayid[idx + i];
if (csum) {
DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
- return 0;
+ return base;
}
static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
@@ -39,7 +55,6 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
{
const u8 *displayid = drm_find_edid_extension(drm_edid, DISPLAYID_EXT, ext_index);
const struct displayid_header *base;
- int ret;
if (!displayid)
return NULL;
@@ -48,11 +63,10 @@ static const u8 *drm_find_displayid_extension(const struct drm_edid *drm_edid,
*length = EDID_LENGTH - 1;
*idx = 1;
- ret = validate_displayid(displayid, *length, *idx);
- if (ret)
+ base = validate_displayid(displayid, *length, *idx);
+ if (IS_ERR(base))
return NULL;
- base = (const struct displayid_header *)&displayid[*idx];
*length = *idx + sizeof(*base) + base->bytes;
return displayid;
@@ -109,6 +123,9 @@ __displayid_iter_next(struct displayid_iter *iter)
}
for (;;) {
+ /* The first section we encounter is the base section */
+ bool base_section = !iter->section;
+
iter->section = drm_find_displayid_extension(iter->drm_edid,
&iter->length,
&iter->idx,
@@ -118,6 +135,18 @@ __displayid_iter_next(struct displayid_iter *iter)
return NULL;
}
+ /* Save the structure version and primary use case. */
+ if (base_section) {
+ const struct displayid_header *base;
+
+ base = displayid_get_header(iter->section, iter->length,
+ iter->idx);
+ if (!IS_ERR(base)) {
+ iter->version = base->rev;
+ iter->primary_use = base->prod_id;
+ }
+ }
+
iter->idx += sizeof(struct displayid_header);
block = displayid_iter_block(iter);
@@ -130,3 +159,18 @@ void displayid_iter_end(struct displayid_iter *iter)
{
memset(iter, 0, sizeof(*iter));
}
+
+/* DisplayID Structure Version/Revision from the Base Section. */
+u8 displayid_version(const struct displayid_iter *iter)
+{
+ return iter->version;
+}
+
+/*
+ * DisplayID Primary Use Case (2.0+) or Product Type Identifier (1.0-1.3) from
+ * the Base Section.
+ */
+u8 displayid_primary_use(const struct displayid_iter *iter)
+{
+ return iter->primary_use;
+}
diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c
index ad17fa21cebb..70032bba1c97 100644
--- a/drivers/gpu/drm/drm_dumb_buffers.c
+++ b/drivers/gpu/drm/drm_dumb_buffers.c
@@ -139,10 +139,7 @@ int drm_mode_destroy_dumb(struct drm_device *dev, u32 handle,
if (!dev->driver->dumb_create)
return -ENOSYS;
- if (dev->driver->dumb_destroy)
- return dev->driver->dumb_destroy(file_priv, dev, handle);
- else
- return drm_gem_dumb_destroy(file_priv, dev, handle);
+ return drm_gem_handle_delete(file_priv, handle);
}
int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3d0a4da661bc..c18ec866678d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3424,10 +3424,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
connector->base.id, connector->name);
return NULL;
}
- if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
- connector->base.id, connector->name);
- }
/* it is incorrect if hsync/vsync width is zero */
if (!hsync_pulse_width || !vsync_pulse_width) {
@@ -3474,10 +3470,27 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
} else {
- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
- mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
- DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ switch (pt->misc & DRM_EDID_PT_SYNC_MASK) {
+ case DRM_EDID_PT_ANALOG_CSYNC:
+ case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC:
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n",
+ connector->base.id, connector->name);
+ mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC;
+ break;
+ case DRM_EDID_PT_DIGITAL_CSYNC:
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n",
+ connector->base.id, connector->name);
+ mode->flags |= DRM_MODE_FLAG_CSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC;
+ break;
+ case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC:
+ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ break;
+ }
}
set_size:
@@ -6433,6 +6446,29 @@ static void drm_reset_display_info(struct drm_connector *connector)
info->quirks = 0;
}
+static void update_displayid_info(struct drm_connector *connector,
+ const struct drm_edid *drm_edid)
+{
+ struct drm_display_info *info = &connector->display_info;
+ const struct displayid_block *block;
+ struct displayid_iter iter;
+
+ displayid_iter_edid_begin(drm_edid, &iter);
+ displayid_iter_for_each(block, &iter) {
+ if (displayid_version(&iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
+ (displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_VR ||
+ displayid_primary_use(&iter) == PRIMARY_USE_HEAD_MOUNTED_AR))
+ info->non_desktop = true;
+
+ /*
+ * We're only interested in the base section here, no need to
+ * iterate further.
+ */
+ break;
+ }
+ displayid_iter_end(&iter);
+}
+
static void update_display_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -6463,6 +6499,8 @@ static void update_display_info(struct drm_connector *connector,
info->color_formats |= DRM_COLOR_FORMAT_RGB444;
drm_parse_cea_ext(connector, drm_edid);
+ update_displayid_info(connector, drm_edid);
+
/*
* Digital sink with "DFP 1.x compliant TMDS" according to EDID 1.3?
*
@@ -7242,6 +7280,15 @@ static void drm_parse_tiled_block(struct drm_connector *connector,
}
}
+static bool displayid_is_tiled_block(const struct displayid_iter *iter,
+ const struct displayid_block *block)
+{
+ return (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_12 &&
+ block->tag == DATA_BLOCK_TILED_DISPLAY) ||
+ (displayid_version(iter) == DISPLAY_ID_STRUCTURE_VER_20 &&
+ block->tag == DATA_BLOCK_2_TILED_DISPLAY_TOPOLOGY);
+}
+
static void _drm_update_tile_info(struct drm_connector *connector,
const struct drm_edid *drm_edid)
{
@@ -7252,7 +7299,7 @@ static void _drm_update_tile_info(struct drm_connector *connector,
displayid_iter_edid_begin(drm_edid, &iter);
displayid_iter_for_each(block, &iter) {
- if (block->tag == DATA_BLOCK_TILED_DISPLAY)
+ if (displayid_is_tiled_block(&iter, block))
drm_parse_tiled_block(connector, block);
}
displayid_iter_end(&iter);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 7a3cb08dc942..ee3e11e7177d 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -336,13 +336,6 @@ out:
}
EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
-int drm_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- u32 handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
/**
* drm_gem_handle_create_tail - internal functions to create a handle
* @file_priv: drm file-private structure to register the handle for
@@ -1466,3 +1459,21 @@ tail:
return freed;
}
EXPORT_SYMBOL(drm_gem_lru_scan);
+
+/**
+ * drm_gem_evict - helper to evict backing pages for a GEM object
+ * @obj: obj in question
+ */
+int drm_gem_evict(struct drm_gem_object *obj)
+{
+ dma_resv_assert_held(obj->resv);
+
+ if (!dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ))
+ return -EBUSY;
+
+ if (obj->funcs->evict)
+ return obj->funcs->evict(obj);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_evict);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 75185a960fc4..9b0d540ff4a8 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -141,7 +141,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- WARN_ON(shmem->vmap_use_count);
+ drm_WARN_ON(obj->dev, shmem->vmap_use_count);
if (obj->import_attach) {
drm_prime_gem_destroy(obj, shmem->sgt);
@@ -156,7 +156,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
drm_gem_shmem_put_pages(shmem);
}
- WARN_ON(shmem->pages_use_count);
+ drm_WARN_ON(obj->dev, shmem->pages_use_count);
drm_gem_object_release(obj);
mutex_destroy(&shmem->pages_lock);
@@ -175,7 +175,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
pages = drm_gem_get_pages(obj);
if (IS_ERR(pages)) {
- DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
+ drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n",
+ PTR_ERR(pages));
shmem->pages_use_count = 0;
return PTR_ERR(pages);
}
@@ -207,9 +208,10 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
*/
int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{
+ struct drm_gem_object *obj = &shmem->base;
int ret;
- WARN_ON(shmem->base.import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
ret = mutex_lock_interruptible(&shmem->pages_lock);
if (ret)
@@ -225,7 +227,7 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- if (WARN_ON_ONCE(!shmem->pages_use_count))
+ if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
return;
if (--shmem->pages_use_count > 0)
@@ -268,7 +270,9 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
*/
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem)
{
- WARN_ON(shmem->base.import_attach);
+ struct drm_gem_object *obj = &shmem->base;
+
+ drm_WARN_ON(obj->dev, obj->import_attach);
return drm_gem_shmem_get_pages(shmem);
}
@@ -283,7 +287,9 @@ EXPORT_SYMBOL(drm_gem_shmem_pin);
*/
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem)
{
- WARN_ON(shmem->base.import_attach);
+ struct drm_gem_object *obj = &shmem->base;
+
+ drm_WARN_ON(obj->dev, obj->import_attach);
drm_gem_shmem_put_pages(shmem);
}
@@ -295,24 +301,22 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
struct drm_gem_object *obj = &shmem->base;
int ret = 0;
- if (shmem->vmap_use_count++ > 0) {
- iosys_map_set_vaddr(map, shmem->vaddr);
- return 0;
- }
-
if (obj->import_attach) {
ret = dma_buf_vmap(obj->import_attach->dmabuf, map);
if (!ret) {
- if (WARN_ON(map->is_iomem)) {
+ if (drm_WARN_ON(obj->dev, map->is_iomem)) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
- ret = -EIO;
- goto err_put_pages;
+ return -EIO;
}
- shmem->vaddr = map->vaddr;
}
} else {
pgprot_t prot = PAGE_KERNEL;
+ if (shmem->vmap_use_count++ > 0) {
+ iosys_map_set_vaddr(map, shmem->vaddr);
+ return 0;
+ }
+
ret = drm_gem_shmem_get_pages(shmem);
if (ret)
goto err_zero_use;
@@ -328,7 +332,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
}
if (ret) {
- DRM_DEBUG_KMS("Failed to vmap pages, error %d\n", ret);
+ drm_dbg_kms(obj->dev, "Failed to vmap pages, error %d\n", ret);
goto err_put_pages;
}
@@ -378,15 +382,15 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
{
struct drm_gem_object *obj = &shmem->base;
- if (WARN_ON_ONCE(!shmem->vmap_use_count))
- return;
-
- if (--shmem->vmap_use_count > 0)
- return;
-
if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
} else {
+ if (drm_WARN_ON_ONCE(obj->dev, !shmem->vmap_use_count))
+ return;
+
+ if (--shmem->vmap_use_count > 0)
+ return;
+
vunmap(shmem->vaddr);
drm_gem_shmem_put_pages(shmem);
}
@@ -461,7 +465,7 @@ void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
- WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
+ drm_WARN_ON(obj->dev, !drm_gem_shmem_is_purgeable(shmem));
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
@@ -550,7 +554,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
mutex_lock(&shmem->pages_lock);
if (page_offset >= num_pages ||
- WARN_ON_ONCE(!shmem->pages) ||
+ drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
shmem->madv < 0) {
ret = VM_FAULT_SIGBUS;
} else {
@@ -569,7 +573,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- WARN_ON(shmem->base.import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
mutex_lock(&shmem->pages_lock);
@@ -578,7 +582,7 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
* mmap'd, vm_open() just grabs an additional reference for the new
* mm the vma is getting copied into (ie. on fork()).
*/
- if (!WARN_ON_ONCE(!shmem->pages_use_count))
+ if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count))
shmem->pages_use_count++;
mutex_unlock(&shmem->pages_lock);
@@ -648,6 +652,9 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent)
{
+ if (shmem->base.import_attach)
+ return;
+
drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
@@ -672,7 +679,7 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
- WARN_ON(shmem->base.import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
return drm_prime_pages_to_sg(obj->dev, shmem->pages, obj->size >> PAGE_SHIFT);
}
@@ -687,7 +694,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
if (shmem->sgt)
return shmem->sgt;
- WARN_ON(obj->import_attach);
+ drm_WARN_ON(obj->dev, obj->import_attach);
ret = drm_gem_shmem_get_pages_locked(shmem);
if (ret)
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index d40b3edb52d0..0bea3df2a16d 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -916,6 +916,17 @@ static int bo_driver_move(struct ttm_buffer_object *bo,
{
struct drm_gem_vram_object *gbo;
+ if (!bo->resource) {
+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_SYSTEM;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
gbo = drm_gem_vram_of_bo(bo);
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index ed2103ee272c..d7e023bbb0d5 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -178,9 +178,6 @@ void drm_gem_unpin(struct drm_gem_object *obj);
int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map);
void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map);
-int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- u32 handle);
-
/* drm_debugfs.c drm_debugfs_crc.c */
#if defined(CONFIG_DEBUG_FS)
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 40d482a01178..ac9a406250c5 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -2339,8 +2339,7 @@ static int drm_mode_parse_cmdline_named_mode(const char *name,
* @mode: preallocated drm_cmdline_mode structure to fill out
*
* This parses @mode_option command line modeline for modes and options to
- * configure the connector. If @mode_option is NULL the default command line
- * modeline in fb_mode_option will be parsed instead.
+ * configure the connector.
*
* This uses the same parameters as the fb modedb.c, except for an extra
* force-enable, force-enable-digital and force-disable bit at the end::
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 7bbcb999bb75..177b600895d3 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -10,6 +10,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -493,3 +494,53 @@ int drm_of_get_data_lanes_count_ep(const struct device_node *port,
return ret;
}
EXPORT_SYMBOL_GPL(drm_of_get_data_lanes_count_ep);
+
+#if IS_ENABLED(CONFIG_DRM_MIPI_DSI)
+
+/**
+ * drm_of_get_dsi_bus - find the DSI bus for a given device
+ * @dev: parent device of display (SPI, I2C)
+ *
+ * Gets parent DSI bus for a DSI device controlled through a bus other
+ * than MIPI-DCS (SPI, I2C, etc.) using the Device Tree.
+ *
+ * Returns pointer to mipi_dsi_host if successful, -EINVAL if the
+ * request is unsupported, -EPROBE_DEFER if the DSI host is found but
+ * not available, or -ENODEV otherwise.
+ */
+struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
+{
+ struct mipi_dsi_host *dsi_host;
+ struct device_node *endpoint, *dsi_host_node;
+
+ /*
+ * Get first endpoint child from device.
+ */
+ endpoint = of_graph_get_next_endpoint(dev->of_node, NULL);
+ if (!endpoint)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Follow the first endpoint to get the DSI host node and then
+ * release the endpoint since we no longer need it.
+ */
+ dsi_host_node = of_graph_get_remote_port_parent(endpoint);
+ of_node_put(endpoint);
+ if (!dsi_host_node)
+ return ERR_PTR(-ENODEV);
+
+ /*
+ * Get the DSI host from the DSI host node. If we get an error
+ * or the return is null assume we're not ready to probe just
+ * yet. Release the DSI host node since we're done with it.
+ */
+ dsi_host = of_find_mipi_dsi_host_by_node(dsi_host_node);
+ of_node_put(dsi_host_node);
+ if (IS_ERR_OR_NULL(dsi_host))
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return dsi_host;
+}
+EXPORT_SYMBOL_GPL(drm_of_get_dsi_bus);
+
+#endif /* CONFIG_DRM_MIPI_DSI */
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 8127be134c39..2fb9bf901a2c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -590,8 +590,9 @@ retry:
*/
dev->mode_config.delayed_event = true;
if (dev->mode_config.poll_enabled)
- schedule_delayed_work(&dev->mode_config.output_poll_work,
- 0);
+ mod_delayed_work(system_wq,
+ &dev->mode_config.output_poll_work,
+ 0);
}
/* Re-enable polling in case the global poll config changed. */
diff --git a/drivers/gpu/drm/drm_suballoc.c b/drivers/gpu/drm/drm_suballoc.c
new file mode 100644
index 000000000000..38cc7a123819
--- /dev/null
+++ b/drivers/gpu/drm/drm_suballoc.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2011 Red Hat Inc.
+ * Copyright 2023 Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/* Algorithm:
+ *
+ * We store the last allocated bo in "hole", we always try to allocate
+ * after the last allocated bo. Principle is that in a linear GPU ring
+ * progression was is after last is the oldest bo we allocated and thus
+ * the first one that should no longer be in use by the GPU.
+ *
+ * If it's not the case we skip over the bo after last to the closest
+ * done bo if such one exist. If none exist and we are not asked to
+ * block we report failure to allocate.
+ *
+ * If we are asked to block we wait on all the oldest fence of all
+ * rings. We just wait for any of those fence to complete.
+ */
+
+#include <drm/drm_suballoc.h>
+#include <drm/drm_print.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/dma-fence.h>
+
+static void drm_suballoc_remove_locked(struct drm_suballoc *sa);
+static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager);
+
+/**
+ * drm_suballoc_manager_init() - Initialise the drm_suballoc_manager
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to suballocate
+ * @align: alignment for each suballocated chunk
+ *
+ * Prepares the suballocation manager for suballocations.
+ */
+void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align)
+{
+ unsigned int i;
+
+ BUILD_BUG_ON(!is_power_of_2(DRM_SUBALLOC_MAX_QUEUES));
+
+ if (!align)
+ align = 1;
+
+ /* alignment must be a power of 2 */
+ if (WARN_ON_ONCE(align & (align - 1)))
+ align = roundup_pow_of_two(align);
+
+ init_waitqueue_head(&sa_manager->wq);
+ sa_manager->size = size;
+ sa_manager->align = align;
+ sa_manager->hole = &sa_manager->olist;
+ INIT_LIST_HEAD(&sa_manager->olist);
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ INIT_LIST_HEAD(&sa_manager->flist[i]);
+}
+EXPORT_SYMBOL(drm_suballoc_manager_init);
+
+/**
+ * drm_suballoc_manager_fini() - Destroy the drm_suballoc_manager
+ * @sa_manager: pointer to the sa_manager
+ *
+ * Cleans up the suballocation manager after use. All fences added
+ * with drm_suballoc_free() must be signaled, or we cannot clean up
+ * the entire manager.
+ */
+void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager)
+{
+ struct drm_suballoc *sa, *tmp;
+
+ if (!sa_manager->size)
+ return;
+
+ if (!list_empty(&sa_manager->olist)) {
+ sa_manager->hole = &sa_manager->olist;
+ drm_suballoc_try_free(sa_manager);
+ if (!list_empty(&sa_manager->olist))
+ DRM_ERROR("sa_manager is not empty, clearing anyway\n");
+ }
+ list_for_each_entry_safe(sa, tmp, &sa_manager->olist, olist) {
+ drm_suballoc_remove_locked(sa);
+ }
+
+ sa_manager->size = 0;
+}
+EXPORT_SYMBOL(drm_suballoc_manager_fini);
+
+static void drm_suballoc_remove_locked(struct drm_suballoc *sa)
+{
+ struct drm_suballoc_manager *sa_manager = sa->manager;
+
+ if (sa_manager->hole == &sa->olist)
+ sa_manager->hole = sa->olist.prev;
+
+ list_del_init(&sa->olist);
+ list_del_init(&sa->flist);
+ dma_fence_put(sa->fence);
+ kfree(sa);
+}
+
+static void drm_suballoc_try_free(struct drm_suballoc_manager *sa_manager)
+{
+ struct drm_suballoc *sa, *tmp;
+
+ if (sa_manager->hole->next == &sa_manager->olist)
+ return;
+
+ sa = list_entry(sa_manager->hole->next, struct drm_suballoc, olist);
+ list_for_each_entry_safe_from(sa, tmp, &sa_manager->olist, olist) {
+ if (!sa->fence || !dma_fence_is_signaled(sa->fence))
+ return;
+
+ drm_suballoc_remove_locked(sa);
+ }
+}
+
+static size_t drm_suballoc_hole_soffset(struct drm_suballoc_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole != &sa_manager->olist)
+ return list_entry(hole, struct drm_suballoc, olist)->eoffset;
+
+ return 0;
+}
+
+static size_t drm_suballoc_hole_eoffset(struct drm_suballoc_manager *sa_manager)
+{
+ struct list_head *hole = sa_manager->hole;
+
+ if (hole->next != &sa_manager->olist)
+ return list_entry(hole->next, struct drm_suballoc, olist)->soffset;
+ return sa_manager->size;
+}
+
+static bool drm_suballoc_try_alloc(struct drm_suballoc_manager *sa_manager,
+ struct drm_suballoc *sa,
+ size_t size, size_t align)
+{
+ size_t soffset, eoffset, wasted;
+
+ soffset = drm_suballoc_hole_soffset(sa_manager);
+ eoffset = drm_suballoc_hole_eoffset(sa_manager);
+ wasted = round_up(soffset, align) - soffset;
+
+ if ((eoffset - soffset) >= (size + wasted)) {
+ soffset += wasted;
+
+ sa->manager = sa_manager;
+ sa->soffset = soffset;
+ sa->eoffset = soffset + size;
+ list_add(&sa->olist, sa_manager->hole);
+ INIT_LIST_HEAD(&sa->flist);
+ sa_manager->hole = &sa->olist;
+ return true;
+ }
+ return false;
+}
+
+static bool __drm_suballoc_event(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align)
+{
+ size_t soffset, eoffset, wasted;
+ unsigned int i;
+
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ if (!list_empty(&sa_manager->flist[i]))
+ return true;
+
+ soffset = drm_suballoc_hole_soffset(sa_manager);
+ eoffset = drm_suballoc_hole_eoffset(sa_manager);
+ wasted = round_up(soffset, align) - soffset;
+
+ return ((eoffset - soffset) >= (size + wasted));
+}
+
+/**
+ * drm_suballoc_event() - Check if we can stop waiting
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to allocate
+ * @align: alignment we need to match
+ *
+ * Return: true if either there is a fence we can wait for or
+ * enough free memory to satisfy the allocation directly.
+ * false otherwise.
+ */
+static bool drm_suballoc_event(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align)
+{
+ bool ret;
+
+ spin_lock(&sa_manager->wq.lock);
+ ret = __drm_suballoc_event(sa_manager, size, align);
+ spin_unlock(&sa_manager->wq.lock);
+ return ret;
+}
+
+static bool drm_suballoc_next_hole(struct drm_suballoc_manager *sa_manager,
+ struct dma_fence **fences,
+ unsigned int *tries)
+{
+ struct drm_suballoc *best_bo = NULL;
+ unsigned int i, best_idx;
+ size_t soffset, best, tmp;
+
+ /* if hole points to the end of the buffer */
+ if (sa_manager->hole->next == &sa_manager->olist) {
+ /* try again with its beginning */
+ sa_manager->hole = &sa_manager->olist;
+ return true;
+ }
+
+ soffset = drm_suballoc_hole_soffset(sa_manager);
+ /* to handle wrap around we add sa_manager->size */
+ best = sa_manager->size * 2;
+ /* go over all fence list and try to find the closest sa
+ * of the current last
+ */
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i) {
+ struct drm_suballoc *sa;
+
+ fences[i] = NULL;
+
+ if (list_empty(&sa_manager->flist[i]))
+ continue;
+
+ sa = list_first_entry(&sa_manager->flist[i],
+ struct drm_suballoc, flist);
+
+ if (!dma_fence_is_signaled(sa->fence)) {
+ fences[i] = sa->fence;
+ continue;
+ }
+
+ /* limit the number of tries each freelist gets */
+ if (tries[i] > 2)
+ continue;
+
+ tmp = sa->soffset;
+ if (tmp < soffset) {
+ /* wrap around, pretend it's after */
+ tmp += sa_manager->size;
+ }
+ tmp -= soffset;
+ if (tmp < best) {
+ /* this sa bo is the closest one */
+ best = tmp;
+ best_idx = i;
+ best_bo = sa;
+ }
+ }
+
+ if (best_bo) {
+ ++tries[best_idx];
+ sa_manager->hole = best_bo->olist.prev;
+
+ /*
+ * We know that this one is signaled,
+ * so it's safe to remove it.
+ */
+ drm_suballoc_remove_locked(best_bo);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * drm_suballoc_new() - Make a suballocation.
+ * @sa_manager: pointer to the sa_manager
+ * @size: number of bytes we want to suballocate.
+ * @gfp: gfp flags used for memory allocation. Typically GFP_KERNEL but
+ * the argument is provided for suballocations from reclaim context or
+ * where the caller wants to avoid pipelining rather than wait for
+ * reclaim.
+ * @intr: Whether to perform waits interruptible. This should typically
+ * always be true, unless the caller needs to propagate a
+ * non-interruptible context from above layers.
+ * @align: Alignment. Must not exceed the default manager alignment.
+ * If @align is zero, then the manager alignment is used.
+ *
+ * Try to make a suballocation of size @size, which will be rounded
+ * up to the alignment specified in specified in drm_suballoc_manager_init().
+ *
+ * Return: a new suballocated bo, or an ERR_PTR.
+ */
+struct drm_suballoc *
+drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size,
+ gfp_t gfp, bool intr, size_t align)
+{
+ struct dma_fence *fences[DRM_SUBALLOC_MAX_QUEUES];
+ unsigned int tries[DRM_SUBALLOC_MAX_QUEUES];
+ unsigned int count;
+ int i, r;
+ struct drm_suballoc *sa;
+
+ if (WARN_ON_ONCE(align > sa_manager->align))
+ return ERR_PTR(-EINVAL);
+ if (WARN_ON_ONCE(size > sa_manager->size || !size))
+ return ERR_PTR(-EINVAL);
+
+ if (!align)
+ align = sa_manager->align;
+
+ sa = kmalloc(sizeof(*sa), gfp);
+ if (!sa)
+ return ERR_PTR(-ENOMEM);
+ sa->manager = sa_manager;
+ sa->fence = NULL;
+ INIT_LIST_HEAD(&sa->olist);
+ INIT_LIST_HEAD(&sa->flist);
+
+ spin_lock(&sa_manager->wq.lock);
+ do {
+ for (i = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ tries[i] = 0;
+
+ do {
+ drm_suballoc_try_free(sa_manager);
+
+ if (drm_suballoc_try_alloc(sa_manager, sa,
+ size, align)) {
+ spin_unlock(&sa_manager->wq.lock);
+ return sa;
+ }
+
+ /* see if we can skip over some allocations */
+ } while (drm_suballoc_next_hole(sa_manager, fences, tries));
+
+ for (i = 0, count = 0; i < DRM_SUBALLOC_MAX_QUEUES; ++i)
+ if (fences[i])
+ fences[count++] = dma_fence_get(fences[i]);
+
+ if (count) {
+ long t;
+
+ spin_unlock(&sa_manager->wq.lock);
+ t = dma_fence_wait_any_timeout(fences, count, intr,
+ MAX_SCHEDULE_TIMEOUT,
+ NULL);
+ for (i = 0; i < count; ++i)
+ dma_fence_put(fences[i]);
+
+ r = (t > 0) ? 0 : t;
+ spin_lock(&sa_manager->wq.lock);
+ } else if (intr) {
+ /* if we have nothing to wait for block */
+ r = wait_event_interruptible_locked
+ (sa_manager->wq,
+ __drm_suballoc_event(sa_manager, size, align));
+ } else {
+ spin_unlock(&sa_manager->wq.lock);
+ wait_event(sa_manager->wq,
+ drm_suballoc_event(sa_manager, size, align));
+ r = 0;
+ spin_lock(&sa_manager->wq.lock);
+ }
+ } while (!r);
+
+ spin_unlock(&sa_manager->wq.lock);
+ kfree(sa);
+ return ERR_PTR(r);
+}
+EXPORT_SYMBOL(drm_suballoc_new);
+
+/**
+ * drm_suballoc_free - Free a suballocation
+ * @suballoc: pointer to the suballocation
+ * @fence: fence that signals when suballocation is idle
+ *
+ * Free the suballocation. The suballocation can be re-used after @fence signals.
+ */
+void drm_suballoc_free(struct drm_suballoc *suballoc,
+ struct dma_fence *fence)
+{
+ struct drm_suballoc_manager *sa_manager;
+
+ if (!suballoc)
+ return;
+
+ sa_manager = suballoc->manager;
+
+ spin_lock(&sa_manager->wq.lock);
+ if (fence && !dma_fence_is_signaled(fence)) {
+ u32 idx;
+
+ suballoc->fence = dma_fence_get(fence);
+ idx = fence->context & (DRM_SUBALLOC_MAX_QUEUES - 1);
+ list_add_tail(&suballoc->flist, &sa_manager->flist[idx]);
+ } else {
+ drm_suballoc_remove_locked(suballoc);
+ }
+ wake_up_all_locked(&sa_manager->wq);
+ spin_unlock(&sa_manager->wq.lock);
+}
+EXPORT_SYMBOL(drm_suballoc_free);
+
+#ifdef CONFIG_DEBUG_FS
+void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
+ struct drm_printer *p,
+ unsigned long long suballoc_base)
+{
+ struct drm_suballoc *i;
+
+ spin_lock(&sa_manager->wq.lock);
+ list_for_each_entry(i, &sa_manager->olist, olist) {
+ unsigned long long soffset = i->soffset;
+ unsigned long long eoffset = i->eoffset;
+
+ if (&i->olist == sa_manager->hole)
+ drm_puts(p, ">");
+ else
+ drm_puts(p, " ");
+
+ drm_printf(p, "[0x%010llx 0x%010llx] size %8lld",
+ suballoc_base + soffset, suballoc_base + eoffset,
+ eoffset - soffset);
+
+ if (i->fence)
+ drm_printf(p, " protected by 0x%016llx on context %llu",
+ (unsigned long long)i->fence->seqno,
+ (unsigned long long)i->fence->context);
+
+ drm_puts(p, "\n");
+ }
+ spin_unlock(&sa_manager->wq.lock);
+}
+EXPORT_SYMBOL(drm_suballoc_dump_debug_info);
+#endif
+MODULE_AUTHOR("Multiple");
+MODULE_DESCRIPTION("Range suballocator helper");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 7420276827a5..341b94672abc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -472,7 +472,7 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
struct ttm_placement place = {};
int ret;
- if (!bo->ttm || bo->resource->mem_type != TTM_PL_SYSTEM)
+ if (!bo->ttm || i915_ttm_cpu_maps_iomem(bo->resource))
return 0;
GEM_BUG_ON(!i915_tt->is_shmem);
@@ -511,7 +511,13 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (bo->resource && !i915_ttm_is_ghost_object(bo)) {
+ /*
+ * This gets called twice by ttm, so long as we have a ttm resource or
+ * ttm_tt then we can still safely call this. Due to pipeline-gutting,
+ * we maybe have NULL bo->resource, but in that case we should always
+ * have a ttm alive (like if the pages are swapped out).
+ */
+ if ((bo->resource || bo->ttm) && !i915_ttm_is_ghost_object(bo)) {
__i915_gem_object_pages_fini(obj);
i915_ttm_free_cached_io_rsgt(obj);
}
@@ -1067,11 +1073,12 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
.interruptible = true,
.no_wait_gpu = true, /* should be idle already */
};
+ int err;
GEM_BUG_ON(!bo->ttm || !(bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED));
- ret = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
- if (ret) {
+ err = ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+ if (err) {
dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 2a94a99ef76b..f8f6bed1b297 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -98,7 +98,7 @@ static inline bool i915_ttm_gtt_binds_lmem(struct ttm_resource *mem)
static inline bool i915_ttm_cpu_maps_iomem(struct ttm_resource *mem)
{
/* Once / if we support GGTT, this is also false for cached ttm_tts */
- return mem->mem_type != I915_PL_SYSTEM;
+ return mem && mem->mem_type != I915_PL_SYSTEM;
}
bool i915_ttm_resource_mappable(struct ttm_resource *res);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 76dd9e5e1a8b..d030182ca176 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -711,6 +711,10 @@ int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
assert_object_held(dst);
assert_object_held(src);
+
+ if (GEM_WARN_ON(!src_bo->resource || !dst_bo->resource))
+ return -EINVAL;
+
i915_deps_init(&deps, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
ret = dma_resv_reserve_fences(src_bo->base.resv, 1);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
index 7e67742bc65e..dfe39c8e74d8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -53,7 +53,7 @@ static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
unsigned int flags;
int err = 0;
- if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
+ if (!i915_ttm_cpu_maps_iomem(bo->resource) || obj->ttm.backup)
return 0;
if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
@@ -187,7 +187,10 @@ static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
return err;
/* Content may have been swapped. */
- err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ if (!backup_bo->resource)
+ err = ttm_bo_validate(backup_bo, i915_ttm_sys_placement(), &ctx);
+ if (!err)
+ err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
if (!err) {
err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
false);
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 3c55ed003359..fcd532db19c1 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -866,10 +866,10 @@ meson_venc_hdmi_supported_mode(const struct drm_display_mode *mode)
DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))
return MODE_BAD;
- if (mode->hdisplay < 640 || mode->hdisplay > 1920)
+ if (mode->hdisplay < 400 || mode->hdisplay > 1920)
return MODE_BAD_HVALUE;
- if (mode->vdisplay < 480 || mode->vdisplay > 1200)
+ if (mode->vdisplay < 480 || mode->vdisplay > 1920)
return MODE_BAD_VVALUE;
return MODE_OK;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 9e604dbb8e44..57c7edcab602 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -375,12 +375,15 @@ int mgag200_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *new_state);
void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *old_state);
+void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane,
+ struct drm_atomic_state *state);
void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *old_state);
#define MGAG200_PRIMARY_PLANE_HELPER_FUNCS \
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, \
.atomic_check = mgag200_primary_plane_helper_atomic_check, \
.atomic_update = mgag200_primary_plane_helper_atomic_update, \
+ .atomic_enable = mgag200_primary_plane_helper_atomic_enable, \
.atomic_disable = mgag200_primary_plane_helper_atomic_disable
#define MGAG200_PRIMARY_PLANE_FUNCS \
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 0a5aaf78172a..0f2dd26755df 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -501,10 +501,6 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_framebuffer *fb = plane_state->fb;
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
- u8 seq1;
-
- if (!fb)
- return;
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
@@ -514,13 +510,19 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
/* Always scanout image at VRAM offset 0 */
mgag200_set_startadd(mdev, (u32)0);
mgag200_set_offset(mdev, fb);
+}
- if (!old_plane_state->crtc && plane_state->crtc) { // enabling
- RREG_SEQ(0x01, seq1);
- seq1 &= ~MGAREG_SEQ1_SCROFF;
- WREG_SEQ(0x01, seq1);
- msleep(20);
- }
+void mgag200_primary_plane_helper_atomic_enable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ u8 seq1;
+
+ RREG_SEQ(0x01, seq1);
+ seq1 &= ~MGAREG_SEQ1_SCROFF;
+ WREG_SEQ(0x01, seq1);
+ msleep(20);
}
void mgag200_primary_plane_helper_atomic_disable(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 288eebc70a67..c2ec91cc845d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1015,9 +1015,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
if (ret)
goto out_ntfy;
- if (nvbo->bo.pin_count)
- NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
-
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index a7db7c31064b..e844be49e11e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -41,7 +41,7 @@ static ssize_t
nouveau_hwmon_show_temp1_auto_point1_pwm(struct device *d,
struct device_attribute *a, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", 100);
+ return sysfs_emit(buf, "%d\n", 100);
}
static SENSOR_DEVICE_ATTR(temp1_auto_point1_pwm, 0444,
nouveau_hwmon_show_temp1_auto_point1_pwm, NULL, 0);
@@ -54,8 +54,8 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
+ return sysfs_emit(buf, "%d\n",
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
}
static ssize_t
nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
@@ -87,8 +87,8 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
+ return sysfs_emit(buf, "%d\n",
+ therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
}
static ssize_t
nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
index 21a5775028cc..bc9bc7208da3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.h
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -27,7 +27,7 @@
#include "nouveau_drv.h"
-struct led_classdev;
+#include <linux/leds.h>
struct nouveau_led {
struct drm_device *dev;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 8eeee71c0000..29cf5fa39ff2 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -318,6 +318,17 @@ config DRM_PANEL_LG_LG4573
Say Y here if you want to enable support for LG4573 RGB panel.
To compile this driver as a module, choose M here.
+config DRM_PANEL_MAGNACHIP_D53E6EA8966
+ tristate "Magnachip D53E6EA8966 DSI panel"
+ depends on OF && SPI
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select DRM_MIPI_DBI
+ help
+ DRM panel driver for the Samsung AMS495QA01 panel controlled
+ with the Magnachip D53E6EA8966 panel IC. This panel receives
+ video data via DSI but commands via 9-bit SPI using DBI.
+
config DRM_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 RGB panel"
depends on GPIOLIB && OF && SPI
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index c05aa9e23907..b3e8ba29edd3 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
+obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3051D) += panel-newvision-nv3051d.o
obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3052C) += panel-newvision-nv3052c.o
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 48c1702a863b..323c33c9c37a 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -167,6 +167,202 @@ static const struct drm_panel_funcs jadard_funcs = {
.get_modes = jadard_get_modes,
};
+static const struct jadard_init_cmd radxa_display_8hd_ad002_init_cmds[] = {
+ { .data = { 0xE0, 0x00 } },
+ { .data = { 0xE1, 0x93 } },
+ { .data = { 0xE2, 0x65 } },
+ { .data = { 0xE3, 0xF8 } },
+ { .data = { 0x80, 0x03 } },
+ { .data = { 0xE0, 0x01 } },
+ { .data = { 0x00, 0x00 } },
+ { .data = { 0x01, 0x7E } },
+ { .data = { 0x03, 0x00 } },
+ { .data = { 0x04, 0x65 } },
+ { .data = { 0x0C, 0x74 } },
+ { .data = { 0x17, 0x00 } },
+ { .data = { 0x18, 0xB7 } },
+ { .data = { 0x19, 0x00 } },
+ { .data = { 0x1A, 0x00 } },
+ { .data = { 0x1B, 0xB7 } },
+ { .data = { 0x1C, 0x00 } },
+ { .data = { 0x24, 0xFE } },
+ { .data = { 0x37, 0x19 } },
+ { .data = { 0x38, 0x05 } },
+ { .data = { 0x39, 0x00 } },
+ { .data = { 0x3A, 0x01 } },
+ { .data = { 0x3B, 0x01 } },
+ { .data = { 0x3C, 0x70 } },
+ { .data = { 0x3D, 0xFF } },
+ { .data = { 0x3E, 0xFF } },
+ { .data = { 0x3F, 0xFF } },
+ { .data = { 0x40, 0x06 } },
+ { .data = { 0x41, 0xA0 } },
+ { .data = { 0x43, 0x1E } },
+ { .data = { 0x44, 0x0F } },
+ { .data = { 0x45, 0x28 } },
+ { .data = { 0x4B, 0x04 } },
+ { .data = { 0x55, 0x02 } },
+ { .data = { 0x56, 0x01 } },
+ { .data = { 0x57, 0xA9 } },
+ { .data = { 0x58, 0x0A } },
+ { .data = { 0x59, 0x0A } },
+ { .data = { 0x5A, 0x37 } },
+ { .data = { 0x5B, 0x19 } },
+ { .data = { 0x5D, 0x78 } },
+ { .data = { 0x5E, 0x63 } },
+ { .data = { 0x5F, 0x54 } },
+ { .data = { 0x60, 0x49 } },
+ { .data = { 0x61, 0x45 } },
+ { .data = { 0x62, 0x38 } },
+ { .data = { 0x63, 0x3D } },
+ { .data = { 0x64, 0x28 } },
+ { .data = { 0x65, 0x43 } },
+ { .data = { 0x66, 0x41 } },
+ { .data = { 0x67, 0x43 } },
+ { .data = { 0x68, 0x62 } },
+ { .data = { 0x69, 0x50 } },
+ { .data = { 0x6A, 0x57 } },
+ { .data = { 0x6B, 0x49 } },
+ { .data = { 0x6C, 0x44 } },
+ { .data = { 0x6D, 0x37 } },
+ { .data = { 0x6E, 0x23 } },
+ { .data = { 0x6F, 0x10 } },
+ { .data = { 0x70, 0x78 } },
+ { .data = { 0x71, 0x63 } },
+ { .data = { 0x72, 0x54 } },
+ { .data = { 0x73, 0x49 } },
+ { .data = { 0x74, 0x45 } },
+ { .data = { 0x75, 0x38 } },
+ { .data = { 0x76, 0x3D } },
+ { .data = { 0x77, 0x28 } },
+ { .data = { 0x78, 0x43 } },
+ { .data = { 0x79, 0x41 } },
+ { .data = { 0x7A, 0x43 } },
+ { .data = { 0x7B, 0x62 } },
+ { .data = { 0x7C, 0x50 } },
+ { .data = { 0x7D, 0x57 } },
+ { .data = { 0x7E, 0x49 } },
+ { .data = { 0x7F, 0x44 } },
+ { .data = { 0x80, 0x37 } },
+ { .data = { 0x81, 0x23 } },
+ { .data = { 0x82, 0x10 } },
+ { .data = { 0xE0, 0x02 } },
+ { .data = { 0x00, 0x47 } },
+ { .data = { 0x01, 0x47 } },
+ { .data = { 0x02, 0x45 } },
+ { .data = { 0x03, 0x45 } },
+ { .data = { 0x04, 0x4B } },
+ { .data = { 0x05, 0x4B } },
+ { .data = { 0x06, 0x49 } },
+ { .data = { 0x07, 0x49 } },
+ { .data = { 0x08, 0x41 } },
+ { .data = { 0x09, 0x1F } },
+ { .data = { 0x0A, 0x1F } },
+ { .data = { 0x0B, 0x1F } },
+ { .data = { 0x0C, 0x1F } },
+ { .data = { 0x0D, 0x1F } },
+ { .data = { 0x0E, 0x1F } },
+ { .data = { 0x0F, 0x5F } },
+ { .data = { 0x10, 0x5F } },
+ { .data = { 0x11, 0x57 } },
+ { .data = { 0x12, 0x77 } },
+ { .data = { 0x13, 0x35 } },
+ { .data = { 0x14, 0x1F } },
+ { .data = { 0x15, 0x1F } },
+ { .data = { 0x16, 0x46 } },
+ { .data = { 0x17, 0x46 } },
+ { .data = { 0x18, 0x44 } },
+ { .data = { 0x19, 0x44 } },
+ { .data = { 0x1A, 0x4A } },
+ { .data = { 0x1B, 0x4A } },
+ { .data = { 0x1C, 0x48 } },
+ { .data = { 0x1D, 0x48 } },
+ { .data = { 0x1E, 0x40 } },
+ { .data = { 0x1F, 0x1F } },
+ { .data = { 0x20, 0x1F } },
+ { .data = { 0x21, 0x1F } },
+ { .data = { 0x22, 0x1F } },
+ { .data = { 0x23, 0x1F } },
+ { .data = { 0x24, 0x1F } },
+ { .data = { 0x25, 0x5F } },
+ { .data = { 0x26, 0x5F } },
+ { .data = { 0x27, 0x57 } },
+ { .data = { 0x28, 0x77 } },
+ { .data = { 0x29, 0x35 } },
+ { .data = { 0x2A, 0x1F } },
+ { .data = { 0x2B, 0x1F } },
+ { .data = { 0x58, 0x40 } },
+ { .data = { 0x59, 0x00 } },
+ { .data = { 0x5A, 0x00 } },
+ { .data = { 0x5B, 0x10 } },
+ { .data = { 0x5C, 0x06 } },
+ { .data = { 0x5D, 0x40 } },
+ { .data = { 0x5E, 0x01 } },
+ { .data = { 0x5F, 0x02 } },
+ { .data = { 0x60, 0x30 } },
+ { .data = { 0x61, 0x01 } },
+ { .data = { 0x62, 0x02 } },
+ { .data = { 0x63, 0x03 } },
+ { .data = { 0x64, 0x6B } },
+ { .data = { 0x65, 0x05 } },
+ { .data = { 0x66, 0x0C } },
+ { .data = { 0x67, 0x73 } },
+ { .data = { 0x68, 0x09 } },
+ { .data = { 0x69, 0x03 } },
+ { .data = { 0x6A, 0x56 } },
+ { .data = { 0x6B, 0x08 } },
+ { .data = { 0x6C, 0x00 } },
+ { .data = { 0x6D, 0x04 } },
+ { .data = { 0x6E, 0x04 } },
+ { .data = { 0x6F, 0x88 } },
+ { .data = { 0x70, 0x00 } },
+ { .data = { 0x71, 0x00 } },
+ { .data = { 0x72, 0x06 } },
+ { .data = { 0x73, 0x7B } },
+ { .data = { 0x74, 0x00 } },
+ { .data = { 0x75, 0xF8 } },
+ { .data = { 0x76, 0x00 } },
+ { .data = { 0x77, 0xD5 } },
+ { .data = { 0x78, 0x2E } },
+ { .data = { 0x79, 0x12 } },
+ { .data = { 0x7A, 0x03 } },
+ { .data = { 0x7B, 0x00 } },
+ { .data = { 0x7C, 0x00 } },
+ { .data = { 0x7D, 0x03 } },
+ { .data = { 0x7E, 0x7B } },
+ { .data = { 0xE0, 0x04 } },
+ { .data = { 0x00, 0x0E } },
+ { .data = { 0x02, 0xB3 } },
+ { .data = { 0x09, 0x60 } },
+ { .data = { 0x0E, 0x2A } },
+ { .data = { 0x36, 0x59 } },
+ { .data = { 0xE0, 0x00 } },
+};
+
+static const struct jadard_panel_desc radxa_display_8hd_ad002_desc = {
+ .mode = {
+ .clock = 70000,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 40,
+ .hsync_end = 800 + 40 + 18,
+ .htotal = 800 + 40 + 18 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 20,
+ .vsync_end = 1280 + 20 + 4,
+ .vtotal = 1280 + 20 + 4 + 20,
+
+ .width_mm = 127,
+ .height_mm = 199,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init_cmds = radxa_display_8hd_ad002_init_cmds,
+ .num_init_cmds = ARRAY_SIZE(radxa_display_8hd_ad002_init_cmds),
+};
+
static const struct jadard_init_cmd cz101b4001_init_cmds[] = {
{ .data = { 0xE0, 0x00 } },
{ .data = { 0xE1, 0x93 } },
@@ -452,7 +648,18 @@ static void jadard_dsi_remove(struct mipi_dsi_device *dsi)
}
static const struct of_device_id jadard_of_match[] = {
- { .compatible = "chongzhou,cz101b4001", .data = &cz101b4001_desc },
+ {
+ .compatible = "chongzhou,cz101b4001",
+ .data = &cz101b4001_desc
+ },
+ {
+ .compatible = "radxa,display-10hd-ad001",
+ .data = &cz101b4001_desc
+ },
+ {
+ .compatible = "radxa,display-8hd-ad002",
+ .data = &radxa_display_8hd_ad002_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, jadard_of_match);
diff --git a/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
new file mode 100644
index 000000000000..8c362c40227f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-magnachip-d53e6ea8966.c
@@ -0,0 +1,522 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Magnachip d53e6ea8966 MIPI-DSI panel driver
+ * Copyright (C) 2023 Chris Morgan
+ */
+
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+/* Forward declaration for use in backlight function */
+struct d53e6ea8966;
+
+/* Panel info, unique to each panel */
+struct d53e6ea8966_panel_info {
+ /** @display_modes: the supported display modes */
+ const struct drm_display_mode *display_modes;
+ /** @num_modes: the number of supported display modes */
+ unsigned int num_modes;
+ /** @width_mm: panel width in mm */
+ u16 width_mm;
+ /** @height_mm: panel height in mm */
+ u16 height_mm;
+ /** @bus_flags: drm bus flags for panel */
+ u32 bus_flags;
+ /** @panel_init_seq: panel specific init sequence */
+ void (*panel_init_seq)(struct d53e6ea8966 *db);
+ /** @backlight_register: panel backlight registration or NULL */
+ int (*backlight_register)(struct d53e6ea8966 *db);
+};
+
+struct d53e6ea8966 {
+ /** @dev: the container device */
+ struct device *dev;
+ /** @dbi: the DBI bus abstraction handle */
+ struct mipi_dbi dbi;
+ /** @panel: the DRM panel instance for this device */
+ struct drm_panel panel;
+ /** @reset: reset GPIO line */
+ struct gpio_desc *reset;
+ /** @enable: enable GPIO line */
+ struct gpio_desc *enable;
+ /** @reg_vdd: VDD supply regulator for panel logic */
+ struct regulator *reg_vdd;
+ /** @reg_elvdd: ELVDD supply regulator for panel display */
+ struct regulator *reg_elvdd;
+ /** @dsi_dev: DSI child device (panel) */
+ struct mipi_dsi_device *dsi_dev;
+ /** @bl_dev: pseudo-backlight device for oled panel */
+ struct backlight_device *bl_dev;
+ /** @panel_info: struct containing panel timing and info */
+ const struct d53e6ea8966_panel_info *panel_info;
+};
+
+#define NUM_GAMMA_LEVELS 16
+#define GAMMA_TABLE_COUNT 23
+#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1)
+
+#define MCS_ELVSS_ON 0xb1
+#define MCS_TEMP_SWIRE 0xb2
+#define MCS_PASSWORD_0 0xf0
+#define MCS_PASSWORD_1 0xf1
+#define MCS_ANALOG_PWR_CTL_0 0xf4
+#define MCS_ANALOG_PWR_CTL_1 0xf5
+#define MCS_GTCON_SET 0xf7
+#define MCS_GATELESS_SIGNAL_SET 0xf8
+#define MCS_SET_GAMMA 0xf9
+
+static inline struct d53e6ea8966 *to_d53e6ea8966(struct drm_panel *panel)
+{
+ return container_of(panel, struct d53e6ea8966, panel);
+}
+
+/* Table of gamma values provided in datasheet */
+static u8 ams495qa01_gamma[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = {
+ {0x01, 0x79, 0x78, 0x8d, 0xd9, 0xdf, 0xd5, 0xcb, 0xcf, 0xc5,
+ 0xe5, 0xe0, 0xe4, 0xdc, 0xb8, 0xd4, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x7d, 0x7c, 0x92, 0xd7, 0xdd, 0xd2, 0xcb, 0xd0, 0xc6,
+ 0xe5, 0xe1, 0xe3, 0xda, 0xbd, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x7f, 0x7e, 0x95, 0xd7, 0xde, 0xd2, 0xcb, 0xcf, 0xc5,
+ 0xe5, 0xe3, 0xe3, 0xda, 0xbf, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x82, 0x81, 0x99, 0xd6, 0xdd, 0xd1, 0xca, 0xcf, 0xc3,
+ 0xe4, 0xe3, 0xe3, 0xda, 0xc2, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x84, 0x83, 0x9b, 0xd7, 0xde, 0xd2, 0xc8, 0xce, 0xc2,
+ 0xe4, 0xe3, 0xe2, 0xd9, 0xc3, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x87, 0x86, 0x9f, 0xd6, 0xdd, 0xd1, 0xc7, 0xce, 0xc1,
+ 0xe4, 0xe3, 0xe2, 0xd9, 0xc6, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x89, 0x89, 0xa2, 0xd5, 0xdb, 0xcf, 0xc8, 0xcf, 0xc2,
+ 0xe3, 0xe3, 0xe1, 0xd9, 0xc7, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x8b, 0x8b, 0xa5, 0xd5, 0xdb, 0xcf, 0xc7, 0xce, 0xc0,
+ 0xe3, 0xe3, 0xe1, 0xd8, 0xc7, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x8d, 0x8d, 0xa7, 0xd5, 0xdb, 0xcf, 0xc6, 0xce, 0xc0,
+ 0xe4, 0xe4, 0xe1, 0xd7, 0xc8, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x8f, 0x8f, 0xaa, 0xd4, 0xdb, 0xce, 0xc6, 0xcd, 0xbf,
+ 0xe3, 0xe3, 0xe1, 0xd7, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x91, 0x91, 0xac, 0xd3, 0xda, 0xce, 0xc5, 0xcd, 0xbe,
+ 0xe3, 0xe3, 0xe0, 0xd7, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x93, 0x93, 0xaf, 0xd3, 0xda, 0xcd, 0xc5, 0xcd, 0xbe,
+ 0xe2, 0xe3, 0xdf, 0xd6, 0xca, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x95, 0x95, 0xb1, 0xd2, 0xd9, 0xcc, 0xc4, 0xcd, 0xbe,
+ 0xe2, 0xe3, 0xdf, 0xd7, 0xcc, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x99, 0x99, 0xb6, 0xd1, 0xd9, 0xcc, 0xc3, 0xcb, 0xbc,
+ 0xe2, 0xe4, 0xdf, 0xd6, 0xcc, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x9c, 0x9c, 0xba, 0xd0, 0xd8, 0xcb, 0xc3, 0xcb, 0xbb,
+ 0xe2, 0xe4, 0xdf, 0xd6, 0xce, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+ {0x01, 0x9f, 0x9f, 0xbe, 0xcf, 0xd7, 0xc9, 0xc2, 0xcb, 0xbb,
+ 0xe1, 0xe3, 0xde, 0xd6, 0xd0, 0xd3, 0xfa, 0xed, 0xe6, 0x2f,
+ 0x00, 0x2f},
+};
+
+/*
+ * Table of elvss values provided in datasheet and corresponds to
+ * gamma values.
+ */
+static u8 ams495qa01_elvss[NUM_GAMMA_LEVELS] = {
+ 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
+ 0x15, 0x15, 0x14, 0x14, 0x13, 0x12,
+};
+
+static int ams495qa01_update_gamma(struct mipi_dbi *dbi, int brightness)
+{
+ int tmp = brightness;
+
+ mipi_dbi_command_buf(dbi, MCS_SET_GAMMA, ams495qa01_gamma[tmp],
+ ARRAY_SIZE(ams495qa01_gamma[tmp]));
+ mipi_dbi_command(dbi, MCS_SET_GAMMA, 0x00);
+
+ /* Undocumented command */
+ mipi_dbi_command(dbi, 0x26, 0x00);
+
+ mipi_dbi_command(dbi, MCS_TEMP_SWIRE, ams495qa01_elvss[tmp]);
+
+ return 0;
+}
+
+static void ams495qa01_panel_init(struct d53e6ea8966 *db)
+{
+ struct mipi_dbi *dbi = &db->dbi;
+
+ mipi_dbi_command(dbi, MCS_PASSWORD_0, 0x5a, 0x5a);
+ mipi_dbi_command(dbi, MCS_PASSWORD_1, 0x5a, 0x5a);
+
+ /* Undocumented commands */
+ mipi_dbi_command(dbi, 0xb0, 0x02);
+ mipi_dbi_command(dbi, 0xf3, 0x3b);
+
+ mipi_dbi_command(dbi, MCS_ANALOG_PWR_CTL_0, 0x33, 0x42, 0x00, 0x08);
+ mipi_dbi_command(dbi, MCS_ANALOG_PWR_CTL_1, 0x00, 0x06, 0x26, 0x35, 0x03);
+
+ /* Undocumented commands */
+ mipi_dbi_command(dbi, 0xf6, 0x02);
+ mipi_dbi_command(dbi, 0xc6, 0x0b, 0x00, 0x00, 0x3c, 0x00, 0x22,
+ 0x00, 0x00, 0x00, 0x00);
+
+ mipi_dbi_command(dbi, MCS_GTCON_SET, 0x20);
+ mipi_dbi_command(dbi, MCS_TEMP_SWIRE, 0x06, 0x06, 0x06, 0x06);
+ mipi_dbi_command(dbi, MCS_ELVSS_ON, 0x07, 0x00, 0x10);
+ mipi_dbi_command(dbi, MCS_GATELESS_SIGNAL_SET, 0x7f, 0x7a,
+ 0x89, 0x67, 0x26, 0x38, 0x00, 0x00, 0x09,
+ 0x67, 0x70, 0x88, 0x7a, 0x76, 0x05, 0x09,
+ 0x23, 0x23, 0x23);
+
+ /* Undocumented commands */
+ mipi_dbi_command(dbi, 0xb5, 0xff, 0xef, 0x35, 0x42, 0x0d, 0xd7,
+ 0xff, 0x07, 0xff, 0xff, 0xfd, 0x00, 0x01,
+ 0xff, 0x05, 0x12, 0x0f, 0xff, 0xff, 0xff,
+ 0xff);
+ mipi_dbi_command(dbi, 0xb4, 0x15);
+ mipi_dbi_command(dbi, 0xb3, 0x00);
+
+ ams495qa01_update_gamma(dbi, MAX_BRIGHTNESS);
+}
+
+static int d53e6ea8966_prepare(struct drm_panel *panel)
+{
+ struct d53e6ea8966 *db = to_d53e6ea8966(panel);
+ int ret;
+
+ /* Power up */
+ ret = regulator_enable(db->reg_vdd);
+ if (ret) {
+ dev_err(db->dev, "failed to enable vdd regulator: %d\n", ret);
+ return ret;
+ }
+
+ if (db->reg_elvdd) {
+ ret = regulator_enable(db->reg_elvdd);
+ if (ret) {
+ dev_err(db->dev,
+ "failed to enable elvdd regulator: %d\n", ret);
+ regulator_disable(db->reg_vdd);
+ return ret;
+ }
+ }
+
+ /* Enable */
+ if (db->enable)
+ gpiod_set_value_cansleep(db->enable, 1);
+
+ msleep(50);
+
+ /* Reset */
+ gpiod_set_value_cansleep(db->reset, 1);
+ usleep_range(1000, 5000);
+ gpiod_set_value_cansleep(db->reset, 0);
+ msleep(20);
+
+ db->panel_info->panel_init_seq(db);
+
+ return 0;
+}
+
+static int d53e6ea8966_enable(struct drm_panel *panel)
+{
+ struct d53e6ea8966 *db = to_d53e6ea8966(panel);
+ struct mipi_dbi *dbi = &db->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(200);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+ usleep_range(10000, 15000);
+
+ return 0;
+}
+
+static int d53e6ea8966_disable(struct drm_panel *panel)
+{
+ struct d53e6ea8966 *db = to_d53e6ea8966(panel);
+ struct mipi_dbi *dbi = &db->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
+ msleep(20);
+ mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE);
+ msleep(100);
+
+ return 0;
+}
+
+static int d53e6ea8966_unprepare(struct drm_panel *panel)
+{
+ struct d53e6ea8966 *db = to_d53e6ea8966(panel);
+
+ if (db->enable)
+ gpiod_set_value_cansleep(db->enable, 0);
+
+ gpiod_set_value_cansleep(db->reset, 1);
+
+ if (db->reg_elvdd)
+ regulator_disable(db->reg_elvdd);
+
+ regulator_disable(db->reg_vdd);
+ msleep(100);
+
+ return 0;
+}
+
+static int d53e6ea8966_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct d53e6ea8966 *db = to_d53e6ea8966(panel);
+ const struct d53e6ea8966_panel_info *panel_info = db->panel_info;
+ struct drm_display_mode *mode;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ unsigned int i;
+
+ for (i = 0; i < panel_info->num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel_info->display_modes[i]);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = panel_info->width_mm;
+ connector->display_info.height_mm = panel_info->height_mm;
+ connector->display_info.bus_flags = panel_info->bus_flags;
+
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs d53e6ea8966_panel_funcs = {
+ .disable = d53e6ea8966_disable,
+ .enable = d53e6ea8966_enable,
+ .get_modes = d53e6ea8966_get_modes,
+ .prepare = d53e6ea8966_prepare,
+ .unprepare = d53e6ea8966_unprepare,
+};
+
+static int ams495qa01_set_brightness(struct backlight_device *bd)
+{
+ struct d53e6ea8966 *db = bl_get_data(bd);
+ struct mipi_dbi *dbi = &db->dbi;
+ int brightness = backlight_get_brightness(bd);
+
+ ams495qa01_update_gamma(dbi, brightness);
+
+ return 0;
+}
+
+static const struct backlight_ops ams495qa01_backlight_ops = {
+ .update_status = ams495qa01_set_brightness,
+};
+
+static int ams495qa01_backlight_register(struct d53e6ea8966 *db)
+{
+ struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = MAX_BRIGHTNESS,
+ .max_brightness = MAX_BRIGHTNESS,
+ };
+ struct device *dev = db->dev;
+ int ret = 0;
+
+ db->bl_dev = devm_backlight_device_register(dev, "panel", dev, db,
+ &ams495qa01_backlight_ops,
+ &props);
+ if (IS_ERR(db->bl_dev)) {
+ ret = PTR_ERR(db->bl_dev);
+ dev_err(dev, "error registering backlight device (%d)\n", ret);
+ }
+
+ return ret;
+}
+
+static int d53e6ea8966_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dsi_host *dsi_host;
+ struct d53e6ea8966 *db;
+ int ret;
+ struct mipi_dsi_device_info info = {
+ .type = "d53e6ea8966",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ db = devm_kzalloc(dev, sizeof(*db), GFP_KERNEL);
+ if (!db)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, db);
+
+ db->dev = dev;
+
+ db->panel_info = of_device_get_match_data(dev);
+ if (!db->panel_info)
+ return -EINVAL;
+
+ db->reg_vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(db->reg_vdd))
+ return dev_err_probe(dev, PTR_ERR(db->reg_vdd),
+ "Failed to get vdd supply\n");
+
+ db->reg_elvdd = devm_regulator_get_optional(dev, "elvdd");
+ if (IS_ERR(db->reg_elvdd))
+ db->reg_elvdd = NULL;
+
+ db->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(db->reset)) {
+ ret = PTR_ERR(db->reset);
+ return dev_err_probe(dev, ret, "no RESET GPIO\n");
+ }
+
+ db->enable = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(db->enable)) {
+ ret = PTR_ERR(db->enable);
+ return dev_err_probe(dev, ret, "cannot get ENABLE GPIO\n");
+ }
+
+ ret = mipi_dbi_spi_init(spi, &db->dbi, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
+
+ dsi_host = drm_of_get_dsi_bus(dev);
+ if (IS_ERR(dsi_host)) {
+ ret = PTR_ERR(dsi_host);
+ return dev_err_probe(dev, ret, "Error attaching DSI bus\n");
+ }
+
+ db->dsi_dev = devm_mipi_dsi_device_register_full(dev, dsi_host, &info);
+ if (IS_ERR(db->dsi_dev)) {
+ dev_err(dev, "failed to register dsi device: %ld\n",
+ PTR_ERR(db->dsi_dev));
+ ret = PTR_ERR(db->dsi_dev);
+ }
+
+ db->dsi_dev->lanes = 2;
+ db->dsi_dev->format = MIPI_DSI_FMT_RGB888;
+ db->dsi_dev->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
+
+ drm_panel_init(&db->panel, dev, &d53e6ea8966_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ if (db->panel_info->backlight_register) {
+ ret = db->panel_info->backlight_register(db);
+ if (ret < 0)
+ return ret;
+ db->panel.backlight = db->bl_dev;
+ }
+
+ drm_panel_add(&db->panel);
+
+ ret = devm_mipi_dsi_attach(dev, db->dsi_dev);
+ if (ret < 0) {
+ dev_err(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&db->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void d53e6ea8966_remove(struct spi_device *spi)
+{
+ struct d53e6ea8966 *db = spi_get_drvdata(spi);
+
+ drm_panel_remove(&db->panel);
+}
+
+static const struct drm_display_mode ams495qa01_modes[] = {
+ { /* 60hz */
+ .clock = 33500,
+ .hdisplay = 960,
+ .hsync_start = 960 + 10,
+ .hsync_end = 960 + 10 + 2,
+ .htotal = 960 + 10 + 2 + 10,
+ .vdisplay = 544,
+ .vsync_start = 544 + 10,
+ .vsync_end = 544 + 10 + 2,
+ .vtotal = 544 + 10 + 2 + 10,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ },
+ { /* 50hz */
+ .clock = 27800,
+ .hdisplay = 960,
+ .hsync_start = 960 + 10,
+ .hsync_end = 960 + 10 + 2,
+ .htotal = 960 + 10 + 2 + 10,
+ .vdisplay = 544,
+ .vsync_start = 544 + 10,
+ .vsync_end = 544 + 10 + 2,
+ .vtotal = 544 + 10 + 2 + 10,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .type = DRM_MODE_TYPE_DRIVER,
+ },
+};
+
+static const struct d53e6ea8966_panel_info ams495qa01_info = {
+ .display_modes = ams495qa01_modes,
+ .num_modes = ARRAY_SIZE(ams495qa01_modes),
+ .width_mm = 117,
+ .height_mm = 74,
+ .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .panel_init_seq = ams495qa01_panel_init,
+ .backlight_register = ams495qa01_backlight_register,
+};
+
+static const struct of_device_id d53e6ea8966_match[] = {
+ { .compatible = "samsung,ams495qa01", .data = &ams495qa01_info },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, d53e6ea8966_match);
+
+static const struct spi_device_id d53e6ea8966_ids[] = {
+ { "ams495qa01", 0 },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(spi, d53e6ea8966_ids);
+
+static struct spi_driver d53e6ea8966_driver = {
+ .driver = {
+ .name = "d53e6ea8966-panel",
+ .of_match_table = d53e6ea8966_match,
+ },
+ .id_table = d53e6ea8966_ids,
+ .probe = d53e6ea8966_probe,
+ .remove = d53e6ea8966_remove,
+};
+module_spi_driver(d53e6ea8966_driver);
+
+MODULE_AUTHOR("Chris Morgan <macromorgan@hotmail.com>");
+MODULE_DESCRIPTION("Magnachip d53e6ea8966 panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index abb0dadd8f63..f49096f53141 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -220,15 +220,8 @@ panfrost_copy_in_sync(struct drm_device *dev,
}
for (i = 0; i < in_fence_count; i++) {
- struct dma_fence *fence;
-
- ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
- &fence);
- if (ret)
- goto fail;
-
- ret = drm_sched_job_add_dependency(&job->base, fence);
-
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv,
+ handles[i], 0);
if (ret)
goto fail;
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index a92a5b0d4c25..1a82629bce3f 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -143,6 +143,17 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_resource *old_mem = bo->resource;
int ret;
+ if (!old_mem) {
+ if (new_mem->mem_type != TTM_PL_SYSTEM) {
+ hop->mem_type = TTM_PL_SYSTEM;
+ hop->flags = TTM_PL_FLAG_TEMPORARY;
+ return -EMULTIHOP;
+ }
+
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
qxl_bo_move_notify(bo, new_mem);
ret = ttm_bo_wait_ctx(bo, ctx);
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 62a596d3a891..e19d77d58810 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -8,6 +8,7 @@ config DRM_RADEON
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
select DRM_KMS_HELPER
+ select DRM_SUBALLOC_HELPER
select DRM_TTM
select DRM_TTM_HELPER
select SND_HDA_COMPONENT if SND_HDA_CORE
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 57e20780a458..d19a4b1c1a8f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -79,6 +79,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_audio_component.h>
+#include <drm/drm_suballoc.h>
#include "radeon_family.h"
#include "radeon_mode.h"
@@ -511,52 +512,12 @@ struct radeon_bo {
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
-/* sub-allocation manager, it has to be protected by another lock.
- * By conception this is an helper for other part of the driver
- * like the indirect buffer or semaphore, which both have their
- * locking.
- *
- * Principe is simple, we keep a list of sub allocation in offset
- * order (first entry has offset == 0, last entry has the highest
- * offset).
- *
- * When allocating new object we first check if there is room at
- * the end total_size - (last_object_offset + last_object_size) >=
- * alloc_size. If so we allocate new object there.
- *
- * When there is not enough room at the end, we start waiting for
- * each sub object until we reach object_offset+object_size >=
- * alloc_size, this object then become the sub object we return.
- *
- * Alignment can't be bigger than page size.
- *
- * Hole are not considered for allocation to keep things simple.
- * Assumption is that there won't be hole (all object on same
- * alignment).
- */
struct radeon_sa_manager {
- wait_queue_head_t wq;
- struct radeon_bo *bo;
- struct list_head *hole;
- struct list_head flist[RADEON_NUM_RINGS];
- struct list_head olist;
- unsigned size;
- uint64_t gpu_addr;
- void *cpu_ptr;
- uint32_t domain;
- uint32_t align;
-};
-
-struct radeon_sa_bo;
-
-/* sub-allocation buffer */
-struct radeon_sa_bo {
- struct list_head olist;
- struct list_head flist;
- struct radeon_sa_manager *manager;
- unsigned soffset;
- unsigned eoffset;
- struct radeon_fence *fence;
+ struct drm_suballoc_manager base;
+ struct radeon_bo *bo;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
+ u32 domain;
};
/*
@@ -587,7 +548,7 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
* Semaphores.
*/
struct radeon_semaphore {
- struct radeon_sa_bo *sa_bo;
+ struct drm_suballoc *sa_bo;
signed waiters;
uint64_t gpu_addr;
};
@@ -816,7 +777,7 @@ void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
*/
struct radeon_ib {
- struct radeon_sa_bo *sa_bo;
+ struct drm_suballoc *sa_bo;
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 62b116727b4f..6a45a72488f9 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -61,7 +61,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
{
int r;
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
+ r = radeon_sa_bo_new(&rdev->ring_tmp_bo, &ib->sa_bo, size, 256);
if (r) {
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
@@ -77,7 +77,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
* space and soffset is the offset inside the pool bo
*/
- ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
+ ib->gpu_addr = drm_suballoc_soffset(ib->sa_bo) + RADEON_VA_IB_OFFSET;
} else {
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
}
@@ -97,7 +97,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
radeon_sync_free(rdev, &ib->sync, ib->fence);
- radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
+ radeon_sa_bo_free(&ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
}
@@ -201,8 +201,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->family >= CHIP_BONAIRE) {
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
- RADEON_IB_POOL_SIZE*64*1024,
- RADEON_GPU_PAGE_SIZE,
+ RADEON_IB_POOL_SIZE*64*1024, 256,
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_GTT_WC);
} else {
@@ -210,8 +209,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
* to the command stream checking
*/
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
- RADEON_IB_POOL_SIZE*64*1024,
- RADEON_GPU_PAGE_SIZE,
+ RADEON_IB_POOL_SIZE*64*1024, 256,
RADEON_GEM_DOMAIN_GTT, 0);
}
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 0a6ef49e990a..39cc87a59a9a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -169,15 +169,22 @@ extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
/*
* sub allocation
*/
+static inline struct radeon_sa_manager *
+to_radeon_sa_manager(struct drm_suballoc_manager *manager)
+{
+ return container_of(manager, struct radeon_sa_manager, base);
+}
-static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
+static inline uint64_t radeon_sa_bo_gpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->gpu_addr + sa_bo->soffset;
+ return to_radeon_sa_manager(sa_bo->manager)->gpu_addr +
+ drm_suballoc_soffset(sa_bo);
}
-static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
+static inline void *radeon_sa_bo_cpu_addr(struct drm_suballoc *sa_bo)
{
- return sa_bo->manager->cpu_ptr + sa_bo->soffset;
+ return to_radeon_sa_manager(sa_bo->manager)->cpu_ptr +
+ drm_suballoc_soffset(sa_bo);
}
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
@@ -190,12 +197,10 @@ extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
-extern int radeon_sa_bo_new(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo **sa_bo,
- unsigned size, unsigned align);
-extern void radeon_sa_bo_free(struct radeon_device *rdev,
- struct radeon_sa_bo **sa_bo,
+extern int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager,
+ struct drm_suballoc **sa_bo,
+ unsigned int size, unsigned int align);
+extern void radeon_sa_bo_free(struct drm_suballoc **sa_bo,
struct radeon_fence *fence);
#if defined(CONFIG_DEBUG_FS)
extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index 0981948bd9ed..c87a57c9c592 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -44,53 +44,32 @@
#include "radeon.h"
-static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
-static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
-
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
- unsigned size, u32 align, u32 domain, u32 flags)
+ unsigned int size, u32 sa_align, u32 domain,
+ u32 flags)
{
- int i, r;
-
- init_waitqueue_head(&sa_manager->wq);
- sa_manager->bo = NULL;
- sa_manager->size = size;
- sa_manager->domain = domain;
- sa_manager->align = align;
- sa_manager->hole = &sa_manager->olist;
- INIT_LIST_HEAD(&sa_manager->olist);
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- INIT_LIST_HEAD(&sa_manager->flist[i]);
- }
+ int r;
- r = radeon_bo_create(rdev, size, align, true,
+ r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
domain, flags, NULL, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
+ sa_manager->domain = domain;
+
+ drm_suballoc_manager_init(&sa_manager->base, size, sa_align);
+
return r;
}
void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager)
{
- struct radeon_sa_bo *sa_bo, *tmp;
-
- if (!list_empty(&sa_manager->olist)) {
- sa_manager->hole = &sa_manager->olist,
- radeon_sa_bo_try_free(sa_manager);
- if (!list_empty(&sa_manager->olist)) {
- dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
- }
- }
- list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
- radeon_sa_bo_remove_locked(sa_bo);
- }
+ drm_suballoc_manager_fini(&sa_manager->base);
radeon_bo_unref(&sa_manager->bo);
- sa_manager->size = 0;
}
int radeon_sa_bo_manager_start(struct radeon_device *rdev,
@@ -139,260 +118,34 @@ int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
return r;
}
-static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
+int radeon_sa_bo_new(struct radeon_sa_manager *sa_manager,
+ struct drm_suballoc **sa_bo,
+ unsigned int size, unsigned int align)
{
- struct radeon_sa_manager *sa_manager = sa_bo->manager;
- if (sa_manager->hole == &sa_bo->olist) {
- sa_manager->hole = sa_bo->olist.prev;
- }
- list_del_init(&sa_bo->olist);
- list_del_init(&sa_bo->flist);
- radeon_fence_unref(&sa_bo->fence);
- kfree(sa_bo);
-}
-
-static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
-{
- struct radeon_sa_bo *sa_bo, *tmp;
-
- if (sa_manager->hole->next == &sa_manager->olist)
- return;
+ struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
+ GFP_KERNEL, true, align);
- sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
- list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
- if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
- return;
- }
- radeon_sa_bo_remove_locked(sa_bo);
+ if (IS_ERR(sa)) {
+ *sa_bo = NULL;
+ return PTR_ERR(sa);
}
-}
-static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole != &sa_manager->olist) {
- return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
- }
+ *sa_bo = sa;
return 0;
}
-static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
-{
- struct list_head *hole = sa_manager->hole;
-
- if (hole->next != &sa_manager->olist) {
- return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
- }
- return sa_manager->size;
-}
-
-static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo *sa_bo,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
-
- soffset = radeon_sa_bo_hole_soffset(sa_manager);
- eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- soffset += wasted;
-
- sa_bo->manager = sa_manager;
- sa_bo->soffset = soffset;
- sa_bo->eoffset = soffset + size;
- list_add(&sa_bo->olist, sa_manager->hole);
- INIT_LIST_HEAD(&sa_bo->flist);
- sa_manager->hole = &sa_bo->olist;
- return true;
- }
- return false;
-}
-
-/**
- * radeon_sa_event - Check if we can stop waiting
- *
- * @sa_manager: pointer to the sa_manager
- * @size: number of bytes we want to allocate
- * @align: alignment we need to match
- *
- * Check if either there is a fence we can wait for or
- * enough free memory to satisfy the allocation directly
- */
-static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
- unsigned size, unsigned align)
-{
- unsigned soffset, eoffset, wasted;
- int i;
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- if (!list_empty(&sa_manager->flist[i])) {
- return true;
- }
- }
-
- soffset = radeon_sa_bo_hole_soffset(sa_manager);
- eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
- wasted = (align - (soffset % align)) % align;
-
- if ((eoffset - soffset) >= (size + wasted)) {
- return true;
- }
-
- return false;
-}
-
-static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
- struct radeon_fence **fences,
- unsigned *tries)
-{
- struct radeon_sa_bo *best_bo = NULL;
- unsigned i, soffset, best, tmp;
-
- /* if hole points to the end of the buffer */
- if (sa_manager->hole->next == &sa_manager->olist) {
- /* try again with its beginning */
- sa_manager->hole = &sa_manager->olist;
- return true;
- }
-
- soffset = radeon_sa_bo_hole_soffset(sa_manager);
- /* to handle wrap around we add sa_manager->size */
- best = sa_manager->size * 2;
- /* go over all fence list and try to find the closest sa_bo
- * of the current last
- */
- for (i = 0; i < RADEON_NUM_RINGS; ++i) {
- struct radeon_sa_bo *sa_bo;
-
- fences[i] = NULL;
-
- if (list_empty(&sa_manager->flist[i])) {
- continue;
- }
-
- sa_bo = list_first_entry(&sa_manager->flist[i],
- struct radeon_sa_bo, flist);
-
- if (!radeon_fence_signaled(sa_bo->fence)) {
- fences[i] = sa_bo->fence;
- continue;
- }
-
- /* limit the number of tries each ring gets */
- if (tries[i] > 2) {
- continue;
- }
-
- tmp = sa_bo->soffset;
- if (tmp < soffset) {
- /* wrap around, pretend it's after */
- tmp += sa_manager->size;
- }
- tmp -= soffset;
- if (tmp < best) {
- /* this sa bo is the closest one */
- best = tmp;
- best_bo = sa_bo;
- }
- }
-
- if (best_bo) {
- ++tries[best_bo->fence->ring];
- sa_manager->hole = best_bo->olist.prev;
-
- /* we knew that this one is signaled,
- so it's save to remote it */
- radeon_sa_bo_remove_locked(best_bo);
- return true;
- }
- return false;
-}
-
-int radeon_sa_bo_new(struct radeon_device *rdev,
- struct radeon_sa_manager *sa_manager,
- struct radeon_sa_bo **sa_bo,
- unsigned size, unsigned align)
-{
- struct radeon_fence *fences[RADEON_NUM_RINGS];
- unsigned tries[RADEON_NUM_RINGS];
- int i, r;
-
- BUG_ON(align > sa_manager->align);
- BUG_ON(size > sa_manager->size);
-
- *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
- if ((*sa_bo) == NULL) {
- return -ENOMEM;
- }
- (*sa_bo)->manager = sa_manager;
- (*sa_bo)->fence = NULL;
- INIT_LIST_HEAD(&(*sa_bo)->olist);
- INIT_LIST_HEAD(&(*sa_bo)->flist);
-
- spin_lock(&sa_manager->wq.lock);
- do {
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- tries[i] = 0;
-
- do {
- radeon_sa_bo_try_free(sa_manager);
-
- if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
- size, align)) {
- spin_unlock(&sa_manager->wq.lock);
- return 0;
- }
-
- /* see if we can skip over some allocations */
- } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
-
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- radeon_fence_ref(fences[i]);
-
- spin_unlock(&sa_manager->wq.lock);
- r = radeon_fence_wait_any(rdev, fences, false);
- for (i = 0; i < RADEON_NUM_RINGS; ++i)
- radeon_fence_unref(&fences[i]);
- spin_lock(&sa_manager->wq.lock);
- /* if we have nothing to wait for block */
- if (r == -ENOENT) {
- r = wait_event_interruptible_locked(
- sa_manager->wq,
- radeon_sa_event(sa_manager, size, align)
- );
- }
-
- } while (!r);
-
- spin_unlock(&sa_manager->wq.lock);
- kfree(*sa_bo);
- *sa_bo = NULL;
- return r;
-}
-
-void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
+void radeon_sa_bo_free(struct drm_suballoc **sa_bo,
struct radeon_fence *fence)
{
- struct radeon_sa_manager *sa_manager;
-
if (sa_bo == NULL || *sa_bo == NULL) {
return;
}
- sa_manager = (*sa_bo)->manager;
- spin_lock(&sa_manager->wq.lock);
- if (fence && !radeon_fence_signaled(fence)) {
- (*sa_bo)->fence = radeon_fence_ref(fence);
- list_add_tail(&(*sa_bo)->flist,
- &sa_manager->flist[fence->ring]);
- } else {
- radeon_sa_bo_remove_locked(*sa_bo);
- }
- wake_up_all_locked(&sa_manager->wq);
- spin_unlock(&sa_manager->wq.lock);
+ if (fence)
+ drm_suballoc_free(*sa_bo, &fence->base);
+ else
+ drm_suballoc_free(*sa_bo, NULL);
+
*sa_bo = NULL;
}
@@ -400,25 +153,8 @@ void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
struct seq_file *m)
{
- struct radeon_sa_bo *i;
+ struct drm_printer p = drm_seq_file_printer(m);
- spin_lock(&sa_manager->wq.lock);
- list_for_each_entry(i, &sa_manager->olist, olist) {
- uint64_t soffset = i->soffset + sa_manager->gpu_addr;
- uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
- if (&i->olist == sa_manager->hole) {
- seq_printf(m, ">");
- } else {
- seq_printf(m, " ");
- }
- seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
- soffset, eoffset, eoffset - soffset);
- if (i->fence) {
- seq_printf(m, " protected by 0x%016llx on ring %d",
- i->fence->seq, i->fence->ring);
- }
- seq_printf(m, "\n");
- }
- spin_unlock(&sa_manager->wq.lock);
+ drm_suballoc_dump_debug_info(&sa_manager->base, &p, sa_manager->gpu_addr);
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 221e59476f64..1f0a9a4ff5ae 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -40,7 +40,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
if (*semaphore == NULL) {
return -ENOMEM;
}
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
+ r = radeon_sa_bo_new(&rdev->ring_tmp_bo,
&(*semaphore)->sa_bo, 8, 8);
if (r) {
kfree(*semaphore);
@@ -100,7 +100,7 @@ void radeon_semaphore_free(struct radeon_device *rdev,
dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
" hardware lockup imminent!\n", *semaphore);
}
- radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
+ radeon_sa_bo_free(&(*semaphore)->sa_bo, fence);
kfree(*semaphore);
*semaphore = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1e8e287e113c..2220cdf6a3f6 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -211,13 +211,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
if (r)
return r;
- /* Can't move a pinned BO */
rbo = container_of(bo, struct radeon_bo, tbo);
- if (WARN_ON_ONCE(rbo->tbo.pin_count > 0))
- return -EINVAL;
-
rdev = radeon_get_rdev(bo->bdev);
- if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
+ if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
+ bo->ttm == NULL)) {
ttm_bo_move_null(bo, new_mem);
goto out;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 8ea09d915c3c..b8f8b45ebf59 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -261,9 +261,6 @@ static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
else
ret = rockchip_drm_gem_object_mmap_dma(obj, vma);
- if (ret)
- drm_gem_vm_close(vma);
-
return ret;
}
@@ -518,8 +515,14 @@ int rockchip_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
if (rk_obj->pages) {
- void *vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
- pgprot_writecombine(PAGE_KERNEL));
+ void *vaddr;
+
+ if (rk_obj->kvaddr)
+ vaddr = rk_obj->kvaddr;
+ else
+ vaddr = vmap(rk_obj->pages, rk_obj->num_pages, VM_MAP,
+ pgprot_writecombine(PAGE_KERNEL));
+
if (!vaddr)
return -ENOMEM;
iosys_map_set_vaddr(map, vaddr);
@@ -539,7 +542,8 @@ void rockchip_gem_prime_vunmap(struct drm_gem_object *obj,
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
if (rk_obj->pages) {
- vunmap(map->vaddr);
+ if (map->vaddr != rk_obj->kvaddr)
+ vunmap(map->vaddr);
return;
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index fa1f4ee6d195..abbc189affa7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -316,13 +316,10 @@ static int vop_convert_afbc_format(uint32_t format)
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
return AFBC_FMT_RGB565;
- /* either of the below should not be reachable */
default:
- DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format);
+ DRM_DEBUG_KMS("unsupported AFBC format[%08x]\n", format);
return -EINVAL;
}
-
- return -EINVAL;
}
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
@@ -2221,7 +2218,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
goto err_disable_pm_runtime;
if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) {
- vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev);
+ vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev, 0);
if (IS_ERR(vop->rgb)) {
ret = PTR_ERR(vop->rgb);
goto err_disable_pm_runtime;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
index ba3b81789509..0e0012368976 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
@@ -38,6 +38,7 @@
#include "rockchip_drm_gem.h"
#include "rockchip_drm_fb.h"
#include "rockchip_drm_vop2.h"
+#include "rockchip_rgb.h"
/*
* VOP2 architecture
@@ -211,6 +212,9 @@ struct vop2 {
struct clk *hclk;
struct clk *aclk;
+ /* optional internal rgb encoder */
+ struct rockchip_rgb *rgb;
+
/* must be put at the end of the struct */
struct vop2_win win[];
};
@@ -2245,7 +2249,7 @@ static struct vop2_video_port *find_vp_without_primary(struct vop2 *vop2)
#define NR_LAYERS 6
-static int vop2_create_crtc(struct vop2 *vop2)
+static int vop2_create_crtcs(struct vop2 *vop2)
{
const struct vop2_data *vop2_data = vop2->data;
struct drm_device *drm = vop2->drm;
@@ -2321,10 +2325,11 @@ static int vop2_create_crtc(struct vop2 *vop2)
/* change the unused primary window to overlay window */
win->type = DRM_PLANE_TYPE_OVERLAY;
}
- }
-
- if (win->type == DRM_PLANE_TYPE_OVERLAY)
+ } else if (win->type == DRM_PLANE_TYPE_OVERLAY) {
possible_crtcs = (1 << nvps) - 1;
+ } else {
+ possible_crtcs = 0;
+ }
ret = vop2_plane_init(vop2, win, possible_crtcs);
if (ret) {
@@ -2370,15 +2375,44 @@ static int vop2_create_crtc(struct vop2 *vop2)
return 0;
}
-static void vop2_destroy_crtc(struct drm_crtc *crtc)
+static void vop2_destroy_crtcs(struct vop2 *vop2)
{
- of_node_put(crtc->port);
+ struct drm_device *drm = vop2->drm;
+ struct list_head *crtc_list = &drm->mode_config.crtc_list;
+ struct list_head *plane_list = &drm->mode_config.plane_list;
+ struct drm_crtc *crtc, *tmpc;
+ struct drm_plane *plane, *tmpp;
+
+ list_for_each_entry_safe(plane, tmpp, plane_list, head)
+ drm_plane_cleanup(plane);
/*
* Destroy CRTC after vop2_plane_destroy() since vop2_disable_plane()
* references the CRTC.
*/
- drm_crtc_cleanup(crtc);
+ list_for_each_entry_safe(crtc, tmpc, crtc_list, head) {
+ of_node_put(crtc->port);
+ drm_crtc_cleanup(crtc);
+ }
+}
+
+static int vop2_find_rgb_encoder(struct vop2 *vop2)
+{
+ struct device_node *node = vop2->dev->of_node;
+ struct device_node *endpoint;
+ int i;
+
+ for (i = 0; i < vop2->data->nr_vps; i++) {
+ endpoint = of_graph_get_endpoint_by_regs(node, i,
+ ROCKCHIP_VOP2_EP_RGB0);
+ if (!endpoint)
+ continue;
+
+ of_node_put(endpoint);
+ return i;
+ }
+
+ return -ENOENT;
}
static struct reg_field vop2_cluster_regs[VOP2_WIN_MAX_REG] = {
@@ -2682,33 +2716,45 @@ static int vop2_bind(struct device *dev, struct device *master, void *data)
if (ret)
return ret;
- ret = vop2_create_crtc(vop2);
+ ret = vop2_create_crtcs(vop2);
if (ret)
return ret;
+ ret = vop2_find_rgb_encoder(vop2);
+ if (ret >= 0) {
+ vop2->rgb = rockchip_rgb_init(dev, &vop2->vps[ret].crtc,
+ vop2->drm, ret);
+ if (IS_ERR(vop2->rgb)) {
+ if (PTR_ERR(vop2->rgb) == -EPROBE_DEFER) {
+ ret = PTR_ERR(vop2->rgb);
+ goto err_crtcs;
+ }
+ vop2->rgb = NULL;
+ }
+ }
+
rockchip_drm_dma_init_device(vop2->drm, vop2->dev);
pm_runtime_enable(&pdev->dev);
return 0;
+
+err_crtcs:
+ vop2_destroy_crtcs(vop2);
+
+ return ret;
}
static void vop2_unbind(struct device *dev, struct device *master, void *data)
{
struct vop2 *vop2 = dev_get_drvdata(dev);
- struct drm_device *drm = vop2->drm;
- struct list_head *plane_list = &drm->mode_config.plane_list;
- struct list_head *crtc_list = &drm->mode_config.crtc_list;
- struct drm_crtc *crtc, *tmpc;
- struct drm_plane *plane, *tmpp;
pm_runtime_disable(dev);
- list_for_each_entry_safe(plane, tmpp, plane_list, head)
- drm_plane_cleanup(plane);
+ if (vop2->rgb)
+ rockchip_rgb_fini(vop2->rgb);
- list_for_each_entry_safe(crtc, tmpc, crtc_list, head)
- vop2_destroy_crtc(crtc);
+ vop2_destroy_crtcs(vop2);
}
const struct component_ops vop2_component_ops = {
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 75eb7cca3d82..c677b71ae516 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -22,13 +22,11 @@
#include "rockchip_drm_vop.h"
#include "rockchip_rgb.h"
-#define encoder_to_rgb(c) container_of(c, struct rockchip_rgb, encoder)
-
struct rockchip_rgb {
struct device *dev;
struct drm_device *drm_dev;
struct drm_bridge *bridge;
- struct drm_encoder encoder;
+ struct rockchip_encoder encoder;
struct drm_connector connector;
int output_mode;
};
@@ -74,7 +72,8 @@ struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
- struct drm_device *drm_dev)
+ struct drm_device *drm_dev,
+ int video_port)
{
struct rockchip_rgb *rgb;
struct drm_encoder *encoder;
@@ -92,7 +91,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
rgb->dev = dev;
rgb->drm_dev = drm_dev;
- port = of_graph_get_port_by_id(dev->of_node, 0);
+ port = of_graph_get_port_by_id(dev->of_node, video_port);
if (!port)
return ERR_PTR(-EINVAL);
@@ -105,8 +104,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
continue;
child_count++;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
- &panel, &bridge);
+ ret = drm_of_find_panel_or_bridge(dev->of_node, video_port,
+ endpoint_id, &panel, &bridge);
if (!ret) {
of_node_put(endpoint);
break;
@@ -125,7 +124,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
return ERR_PTR(ret);
}
- encoder = &rgb->encoder;
+ encoder = &rgb->encoder.encoder;
encoder->possible_crtcs = drm_crtc_mask(crtc);
ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE);
@@ -161,6 +160,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
goto err_free_encoder;
}
+ rgb->encoder.crtc_endpoint_id = endpoint_id;
+
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
@@ -182,6 +183,6 @@ void rockchip_rgb_fini(struct rockchip_rgb *rgb)
{
drm_panel_bridge_remove(rgb->bridge);
drm_connector_cleanup(&rgb->connector);
- drm_encoder_cleanup(&rgb->encoder);
+ drm_encoder_cleanup(&rgb->encoder.encoder);
}
EXPORT_SYMBOL_GPL(rockchip_rgb_fini);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
index 27b9635124bc..1bd4e20e91eb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.h
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -8,12 +8,14 @@
#ifdef CONFIG_ROCKCHIP_RGB
struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
- struct drm_device *drm_dev);
+ struct drm_device *drm_dev,
+ int video_port);
void rockchip_rgb_fini(struct rockchip_rgb *rgb);
#else
static inline struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
- struct drm_device *drm_dev)
+ struct drm_device *drm_dev,
+ int video_port)
{
return NULL;
}
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 4e6ad6e122bc..214364fccb71 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -53,6 +53,7 @@
#include <drm/drm_print.h>
#include <drm/drm_gem.h>
+#include <drm/drm_syncobj.h>
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
@@ -719,6 +720,34 @@ int drm_sched_job_add_dependency(struct drm_sched_job *job,
EXPORT_SYMBOL(drm_sched_job_add_dependency);
/**
+ * drm_sched_job_add_syncobj_dependency - adds a syncobj's fence as a job dependency
+ * @job: scheduler job to add the dependencies to
+ * @file_private: drm file private pointer
+ * @handle: syncobj handle to lookup
+ * @point: timeline point
+ *
+ * This adds the fence matching the given syncobj to @job.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
+ struct drm_file *file,
+ u32 handle,
+ u32 point)
+{
+ struct dma_fence *fence;
+ int ret;
+
+ ret = drm_syncobj_find_fence(file, handle, point, 0, &fence);
+ if (ret)
+ return ret;
+
+ return drm_sched_job_add_dependency(job, fence);
+}
+EXPORT_SYMBOL(drm_sched_job_add_syncobj_dependency);
+
+/**
* drm_sched_job_add_resv_dependencies - add all fences from the resv to the job
* @job: scheduler job to add the dependencies to
* @resv: the dma_resv object to get the fences from
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
index 34e80eb6d96e..9536829c6e3a 100644
--- a/drivers/gpu/drm/tests/drm_format_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -597,7 +597,7 @@ static void drm_test_fb_xrgb8888_to_xrgb1555(struct kunit *test)
drm_fb_xrgb8888_to_xrgb1555(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
- KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
@@ -628,7 +628,7 @@ static void drm_test_fb_xrgb8888_to_argb1555(struct kunit *test)
drm_fb_xrgb8888_to_argb1555(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
- KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
@@ -659,7 +659,7 @@ static void drm_test_fb_xrgb8888_to_rgba5551(struct kunit *test)
drm_fb_xrgb8888_to_rgba5551(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16));
- KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
@@ -724,7 +724,7 @@ static void drm_test_fb_xrgb8888_to_argb8888(struct kunit *test)
drm_fb_xrgb8888_to_argb8888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
- KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
@@ -786,7 +786,7 @@ static void drm_test_fb_xrgb8888_to_argb2101010(struct kunit *test)
drm_fb_xrgb8888_to_argb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32));
- KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+ KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size);
}
static struct kunit_case drm_format_helper_test_cases[] = {
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 165365b515e1..dca077411f77 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -1985,9 +1985,9 @@ dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
(y * fb->pitches[1] / fb->format->vsub);
}
-int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
- const struct drm_plane_state *state,
- u32 hw_videoport)
+void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport)
{
bool lite = dispc->feat->vid_lite[hw_plane];
u32 fourcc = state->fb->format->format;
@@ -2066,15 +2066,11 @@ int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
else
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
28, 28);
-
- return 0;
}
-int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
+void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
{
VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
-
- return 0;
}
static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index e49432f0abf5..946ed769caaf 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -123,10 +123,10 @@ int dispc_runtime_resume(struct dispc_device *dispc);
int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
const struct drm_plane_state *state,
u32 hw_videoport);
-int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
- const struct drm_plane_state *state,
- u32 hw_videoport);
-int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
+void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport);
+void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len);
int dispc_init(struct tidss_device *tidss);
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index fe2c41f0cd4f..6bdd6e4a955a 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -113,7 +113,6 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
u32 hw_videoport;
- int ret;
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -124,15 +123,17 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
hw_videoport = to_tidss_crtc(new_state->crtc)->hw_videoport;
- ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id,
- new_state, hw_videoport);
+ dispc_plane_setup(tidss->dispc, tplane->hw_plane_id, new_state, hw_videoport);
+}
- if (ret) {
- dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n",
- __func__, tplane->hw_plane_id);
- dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
- return;
- }
+static void tidss_plane_atomic_enable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = to_tidss(ddev);
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true);
}
@@ -160,6 +161,7 @@ static void drm_plane_destroy(struct drm_plane *plane)
static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
.atomic_check = tidss_plane_atomic_check,
.atomic_update = tidss_plane_atomic_update,
+ .atomic_enable = tidss_plane_atomic_enable,
.atomic_disable = tidss_plane_atomic_disable,
};
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index 63881a3754f8..c38d85848af8 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -606,16 +606,12 @@ static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
*/
static struct drm_display_mode simpledrm_mode(unsigned int width,
- unsigned int height)
+ unsigned int height,
+ unsigned int width_mm,
+ unsigned int height_mm)
{
- /*
- * Assume a monitor resolution of 96 dpi to
- * get a somewhat reasonable screen size.
- */
const struct drm_display_mode mode = {
- DRM_MODE_INIT(60, width, height,
- DRM_MODE_RES_MM(width, 96ul),
- DRM_MODE_RES_MM(height, 96ul))
+ DRM_MODE_INIT(60, width, height, width_mm, height_mm)
};
return mode;
@@ -629,6 +625,8 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
struct simpledrm_device *sdev;
struct drm_device *dev;
int width, height, stride;
+ int width_mm = 0, height_mm = 0;
+ struct device_node *panel_node;
const struct drm_format_info *format;
struct resource *res, *mem = NULL;
struct drm_plane *primary_plane;
@@ -685,6 +683,12 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
mem = simplefb_get_memory_of(dev, of_node);
if (IS_ERR(mem))
return ERR_CAST(mem);
+ panel_node = of_parse_phandle(of_node, "panel", 0);
+ if (panel_node) {
+ simplefb_read_u32_of(dev, panel_node, "width-mm", &width_mm);
+ simplefb_read_u32_of(dev, panel_node, "height-mm", &height_mm);
+ of_node_put(panel_node);
+ }
} else {
drm_err(dev, "no simplefb configuration found\n");
return ERR_PTR(-ENODEV);
@@ -695,7 +699,16 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
return ERR_PTR(-EINVAL);
}
- sdev->mode = simpledrm_mode(width, height);
+ /*
+ * Assume a monitor resolution of 96 dpi if physical dimensions
+ * are not specified to get a somewhat reasonable screen size.
+ */
+ if (!width_mm)
+ width_mm = DRM_MODE_RES_MM(width, 96ul);
+ if (!height_mm)
+ height_mm = DRM_MODE_RES_MM(height, 96ul);
+
+ sdev->mode = simpledrm_mode(width, height, width_mm, height_mm);
sdev->format = format;
sdev->pitch = stride;
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 326a3d13a829..882c2fa346f3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -120,8 +120,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
bool old_use_tt, new_use_tt;
int ret;
- old_use_tt = bo->resource &&
- ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
+ old_use_tt = !bo->resource || ttm_manager_type(bdev, bo->resource->mem_type)->use_tt;
new_use_tt = ttm_manager_type(bdev, mem->mem_type)->use_tt;
ttm_bo_unmap_virtual(bo);
@@ -894,14 +893,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
if (!placement->num_placement && !placement->num_busy_placement)
return ttm_bo_pipeline_gutting(bo);
- /*
- * Check whether we need to move buffer.
- */
- if (!bo->resource || !ttm_resource_compat(bo->resource, placement)) {
- ret = ttm_bo_move_buffer(bo, placement, ctx);
- if (ret)
- return ret;
- }
+ /* Check whether we need to move buffer. */
+ if (bo->resource && ttm_resource_compat(bo->resource, placement))
+ return 0;
+
+ /* Moving of pinned BOs is forbidden */
+ if (bo->pin_count)
+ return -EINVAL;
+
+ ret = ttm_bo_move_buffer(bo, placement, ctx);
+ if (ret)
+ return ret;
+
/*
* We might need to add a TTM.
*/
@@ -953,7 +956,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
- static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
int ret;
kref_init(&bo->kref);
@@ -970,12 +972,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
bo->base.resv = &bo->base._resv;
atomic_inc(&ttm_glob.bo_count);
- ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
- if (unlikely(ret)) {
- ttm_bo_put(bo);
- return ret;
- }
-
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 7635d7d6b13b..fd9fd3d15101 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -157,8 +157,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool clear;
int ret = 0;
- if (!src_mem)
- return 0;
+ if (WARN_ON(!src_mem))
+ return -EINVAL;
src_man = ttm_manager_type(bdev, src_mem->mem_type);
if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
@@ -704,30 +704,23 @@ EXPORT_SYMBOL(ttm_bo_move_sync_cleanup);
*/
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
- static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
struct ttm_buffer_object *ghost;
- struct ttm_resource *sys_res;
struct ttm_tt *ttm;
int ret;
- ret = ttm_resource_alloc(bo, &sys_mem, &sys_res);
- if (ret)
- return ret;
-
/* If already idle, no need for ghost object dance. */
if (dma_resv_test_signaled(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP)) {
if (!bo->ttm) {
/* See comment below about clearing. */
ret = ttm_tt_create(bo, true);
if (ret)
- goto error_free_sys_mem;
+ return ret;
} else {
ttm_tt_unpopulate(bo->bdev, bo->ttm);
if (bo->type == ttm_bo_type_device)
ttm_tt_mark_for_clear(bo->ttm);
}
ttm_resource_free(bo, &bo->resource);
- ttm_bo_assign_mem(bo, sys_res);
return 0;
}
@@ -744,7 +737,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
ret = ttm_tt_create(bo, true);
swap(bo->ttm, ttm);
if (ret)
- goto error_free_sys_mem;
+ return ret;
ret = ttm_buffer_object_transfer(bo, &ghost);
if (ret)
@@ -760,13 +753,9 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
dma_resv_unlock(&ghost->base._resv);
ttm_bo_put(ghost);
bo->ttm = ttm;
- ttm_bo_assign_mem(bo, sys_res);
return 0;
error_destroy_tt:
ttm_tt_destroy(bo->bdev, ttm);
-
-error_free_sys_mem:
- ttm_resource_free(bo, &sys_res);
return ret;
}
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index b8a826a24fb2..7333f7a87a2f 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -361,7 +361,6 @@ bool ttm_resource_compat(struct ttm_resource *res,
return false;
}
-EXPORT_SYMBOL(ttm_resource_compat);
void ttm_resource_set_bo(struct ttm_resource *res,
struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 5da1806f3969..2e94ce788c71 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -397,20 +397,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
}
static int
-v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job,
- u32 in_sync, u32 point)
-{
- struct dma_fence *in_fence = NULL;
- int ret;
-
- ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence);
- if (ret == -EINVAL)
- return ret;
-
- return drm_sched_job_add_dependency(&job->base, in_fence);
-}
-
-static int
v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
void **container, size_t size, void (*free)(struct kref *ref),
u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
@@ -447,14 +433,18 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
DRM_DEBUG("Failed to copy wait dep handle.\n");
goto fail_deps;
}
- ret = v3d_job_add_deps(file_priv, job, in.handle, 0);
- if (ret)
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in.handle, 0);
+
+ // TODO: Investigate why this was filtered out for the IOCTL.
+ if (ret && ret != -ENOENT)
goto fail_deps;
}
}
} else {
- ret = v3d_job_add_deps(file_priv, job, in_sync, 0);
- if (ret)
+ ret = drm_sched_job_add_syncobj_dependency(&job->base, file_priv, in_sync, 0);
+
+ // TODO: Investigate why this was filtered out for the IOCTL.
+ if (ret && ret != -ENOENT)
goto fail_deps;
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 95069bb16821..8768566c610b 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -690,7 +690,7 @@ struct vc4_exec_info {
/* This is the array of BOs that were looked up at the start of exec.
* Command validation will use indices into this array.
*/
- struct drm_gem_dma_object **bo;
+ struct drm_gem_object **bo;
uint32_t bo_count;
/* List of BOs that are being written by the RCL. Other than
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 628d40ff3aa1..03648f954985 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -199,7 +199,7 @@ vc4_save_hang_state(struct drm_device *dev)
continue;
for (j = 0; j < exec[i]->bo_count; j++) {
- bo = to_vc4_bo(&exec[i]->bo[j]->base);
+ bo = to_vc4_bo(exec[i]->bo[j]);
/* Retain BOs just in case they were marked purgeable.
* This prevents the BO from being purged before
@@ -207,8 +207,8 @@ vc4_save_hang_state(struct drm_device *dev)
*/
WARN_ON(!refcount_read(&bo->usecnt));
refcount_inc(&bo->usecnt);
- drm_gem_object_get(&exec[i]->bo[j]->base);
- kernel_state->bo[k++] = &exec[i]->bo[j]->base;
+ drm_gem_object_get(exec[i]->bo[j]);
+ kernel_state->bo[k++] = exec[i]->bo[j];
}
list_for_each_entry(bo, &exec[i]->unref_list, unref_head) {
@@ -558,7 +558,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
unsigned i;
for (i = 0; i < exec->bo_count; i++) {
- bo = to_vc4_bo(&exec->bo[i]->base);
+ bo = to_vc4_bo(exec->bo[i]);
bo->seqno = seqno;
dma_resv_add_fence(bo->base.base.resv, exec->fence,
@@ -585,11 +585,8 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
{
int i;
- for (i = 0; i < exec->bo_count; i++) {
- struct drm_gem_object *bo = &exec->bo[i]->base;
-
- dma_resv_unlock(bo->resv);
- }
+ for (i = 0; i < exec->bo_count; i++)
+ dma_resv_unlock(exec->bo[i]->resv);
ww_acquire_fini(acquire_ctx);
}
@@ -614,7 +611,7 @@ vc4_lock_bo_reservations(struct drm_device *dev,
retry:
if (contended_lock != -1) {
- bo = &exec->bo[contended_lock]->base;
+ bo = exec->bo[contended_lock];
ret = dma_resv_lock_slow_interruptible(bo->resv, acquire_ctx);
if (ret) {
ww_acquire_done(acquire_ctx);
@@ -626,19 +623,19 @@ retry:
if (i == contended_lock)
continue;
- bo = &exec->bo[i]->base;
+ bo = exec->bo[i];
ret = dma_resv_lock_interruptible(bo->resv, acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++) {
- bo = &exec->bo[j]->base;
+ bo = exec->bo[j];
dma_resv_unlock(bo->resv);
}
if (contended_lock != -1 && contended_lock >= i) {
- bo = &exec->bo[contended_lock]->base;
+ bo = exec->bo[contended_lock];
dma_resv_unlock(bo->resv);
}
@@ -659,7 +656,7 @@ retry:
* before we commit the CL to the hardware.
*/
for (i = 0; i < exec->bo_count; i++) {
- bo = &exec->bo[i]->base;
+ bo = exec->bo[i];
ret = dma_resv_reserve_fences(bo->resv, 1);
if (ret) {
@@ -749,7 +746,6 @@ vc4_cl_lookup_bos(struct drm_device *dev,
struct vc4_exec_info *exec)
{
struct drm_vc4_submit_cl *args = exec->args;
- uint32_t *handles;
int ret = 0;
int i;
@@ -763,54 +759,18 @@ vc4_cl_lookup_bos(struct drm_device *dev,
return -EINVAL;
}
- exec->bo = kvmalloc_array(exec->bo_count,
- sizeof(struct drm_gem_dma_object *),
- GFP_KERNEL | __GFP_ZERO);
- if (!exec->bo) {
- DRM_ERROR("Failed to allocate validated BO pointers\n");
- return -ENOMEM;
- }
-
- handles = kvmalloc_array(exec->bo_count, sizeof(uint32_t), GFP_KERNEL);
- if (!handles) {
- ret = -ENOMEM;
- DRM_ERROR("Failed to allocate incoming GEM handles\n");
- goto fail;
- }
-
- if (copy_from_user(handles, u64_to_user_ptr(args->bo_handles),
- exec->bo_count * sizeof(uint32_t))) {
- ret = -EFAULT;
- DRM_ERROR("Failed to copy in GEM handles\n");
- goto fail;
- }
-
- spin_lock(&file_priv->table_lock);
- for (i = 0; i < exec->bo_count; i++) {
- struct drm_gem_object *bo = idr_find(&file_priv->object_idr,
- handles[i]);
- if (!bo) {
- DRM_DEBUG("Failed to look up GEM BO %d: %d\n",
- i, handles[i]);
- ret = -EINVAL;
- break;
- }
-
- drm_gem_object_get(bo);
- exec->bo[i] = (struct drm_gem_dma_object *)bo;
- }
- spin_unlock(&file_priv->table_lock);
+ ret = drm_gem_objects_lookup(file_priv, u64_to_user_ptr(args->bo_handles),
+ exec->bo_count, &exec->bo);
if (ret)
goto fail_put_bo;
for (i = 0; i < exec->bo_count; i++) {
- ret = vc4_bo_inc_usecnt(to_vc4_bo(&exec->bo[i]->base));
+ ret = vc4_bo_inc_usecnt(to_vc4_bo(exec->bo[i]));
if (ret)
goto fail_dec_usecnt;
}
- kvfree(handles);
return 0;
fail_dec_usecnt:
@@ -823,15 +783,13 @@ fail_dec_usecnt:
* step.
*/
for (i-- ; i >= 0; i--)
- vc4_bo_dec_usecnt(to_vc4_bo(&exec->bo[i]->base));
+ vc4_bo_dec_usecnt(to_vc4_bo(exec->bo[i]));
fail_put_bo:
/* Release any reference to acquired objects. */
for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
- drm_gem_object_put(&exec->bo[i]->base);
+ drm_gem_object_put(exec->bo[i]);
-fail:
- kvfree(handles);
kvfree(exec->bo);
exec->bo = NULL;
return ret;
@@ -974,10 +932,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
if (exec->bo) {
for (i = 0; i < exec->bo_count; i++) {
- struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
+ struct vc4_bo *bo = to_vc4_bo(exec->bo[i]);
vc4_bo_dec_usecnt(bo);
- drm_gem_object_put(&exec->bo[i]->base);
+ drm_gem_object_put(exec->bo[i]);
}
kvfree(exec->bo);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ea22c9bf223a..d30e4547b4c5 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1466,6 +1466,12 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
if (!drm_dev_enter(drm, &idx))
goto out;
+ ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
+ if (ret < 0) {
+ DRM_ERROR("Failed to retain power domain: %d\n", ret);
+ goto err_dev_exit;
+ }
+
/*
* As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
* be faster than pixel clock, infinitesimally faster, tested in
@@ -1482,17 +1488,13 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder,
* Additionally, the AXI clock needs to be at least 25% of
* pixel clock, but HSM ends up being the limiting factor.
*/
- hsm_rate = max_t(unsigned long, 120000000, (tmds_char_rate / 100) * 101);
+ hsm_rate = max_t(unsigned long,
+ HSM_MIN_CLOCK_FREQ,
+ (tmds_char_rate / 100) * 101);
ret = clk_set_min_rate(vc4_hdmi->hsm_clock, hsm_rate);
if (ret) {
DRM_ERROR("Failed to set HSM clock rate: %d\n", ret);
- goto err_dev_exit;
- }
-
- ret = pm_runtime_resume_and_get(&vc4_hdmi->pdev->dev);
- if (ret < 0) {
- DRM_ERROR("Failed to retain power domain: %d\n", ret);
- goto err_dev_exit;
+ goto err_put_runtime_pm;
}
ret = clk_set_rate(vc4_hdmi->pixel_clock, tmds_char_rate);
@@ -3188,16 +3190,9 @@ static int vc4_hdmi_init_resources(struct drm_device *drm,
DRM_ERROR("Failed to get HDMI state machine clock\n");
return PTR_ERR(vc4_hdmi->hsm_clock);
}
-
vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock;
vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock;
- vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi");
- if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) {
- DRM_ERROR("Failed to get HDMI state machine clock\n");
- return PTR_ERR(vc4_hdmi->hsm_rpm_clock);
- }
-
return 0;
}
@@ -3280,12 +3275,6 @@ static int vc5_hdmi_init_resources(struct drm_device *drm,
return PTR_ERR(vc4_hdmi->hsm_clock);
}
- vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi");
- if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) {
- DRM_ERROR("Failed to get HDMI state machine clock\n");
- return PTR_ERR(vc4_hdmi->hsm_rpm_clock);
- }
-
vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb");
if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) {
DRM_ERROR("Failed to get pixel bvb clock\n");
@@ -3349,7 +3338,7 @@ static int vc4_hdmi_runtime_suspend(struct device *dev)
{
struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev);
- clk_disable_unprepare(vc4_hdmi->hsm_rpm_clock);
+ clk_disable_unprepare(vc4_hdmi->hsm_clock);
return 0;
}
@@ -3362,16 +3351,7 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
unsigned long rate;
int ret;
- /*
- * The HSM clock is in the HDMI power domain, so we need to set
- * its frequency while the power domain is active so that it
- * keeps its rate.
- */
- ret = clk_set_min_rate(vc4_hdmi->hsm_rpm_clock, HSM_MIN_CLOCK_FREQ);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(vc4_hdmi->hsm_rpm_clock);
+ ret = clk_prepare_enable(vc4_hdmi->hsm_clock);
if (ret)
return ret;
@@ -3384,7 +3364,7 @@ static int vc4_hdmi_runtime_resume(struct device *dev)
* case, it will lead to a silent CPU stall. Let's make sure we
* prevent such a case.
*/
- rate = clk_get_rate(vc4_hdmi->hsm_rpm_clock);
+ rate = clk_get_rate(vc4_hdmi->hsm_clock);
if (!rate) {
ret = -EINVAL;
goto err_disable_clk;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index dc3ccd8002a0..e3619836ca17 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -164,7 +164,6 @@ struct vc4_hdmi {
struct clk *cec_clock;
struct clk *pixel_clock;
struct clk *hsm_clock;
- struct clk *hsm_rpm_clock;
struct clk *audio_clock;
struct clk *pixel_bvb_clock;
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 520231af4df9..7dff3ca5af6b 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -117,7 +117,7 @@ vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex)
hindex, exec->bo_count);
return NULL;
}
- obj = exec->bo[hindex];
+ obj = to_drm_gem_dma_obj(exec->bo[hindex]);
bo = to_vc4_bo(&obj->base);
if (bo->validated_shader) {
@@ -810,7 +810,7 @@ validate_gl_shader_rec(struct drm_device *dev,
return -EINVAL;
}
- bo[i] = exec->bo[src_handles[i]];
+ bo[i] = to_drm_gem_dma_obj(exec->bo[src_handles[i]]);
if (!bo[i])
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index c2a879734d40..e15754178395 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -249,4 +249,5 @@ void vgem_fence_close(struct vgem_file *vfile)
{
idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
idr_destroy(&vfile->fence_idr);
+ mutex_destroy(&vfile->fence_mutex);
}
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index 51ec7c3240c9..ea06ff2aa4b4 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -11,3 +11,14 @@ config DRM_VIRTIO_GPU
QEMU based VMMs (like KVM or Xen).
If unsure say M.
+
+config DRM_VIRTIO_GPU_KMS
+ bool "Virtio GPU driver modesetting support"
+ depends on DRM_VIRTIO_GPU
+ default y
+ help
+ Enable modesetting support for virtio GPU driver. This can be
+ disabled in cases where only "headless" usage of the GPU is
+ required.
+
+ If unsure, say Y.
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 9ea7611a9e0f..ad924a8502e9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -336,6 +336,9 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
{
int i, ret;
+ if (!vgdev->num_scanouts)
+ return 0;
+
ret = drmm_mode_config_init(vgdev->ddev);
if (ret)
return ret;
@@ -362,6 +365,9 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
{
int i;
+ if (!vgdev->num_scanouts)
+ return;
+
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ae97b98750b6..add075681e18 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -172,6 +172,10 @@ MODULE_AUTHOR("Alon Levy");
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static const struct drm_driver driver = {
+ /*
+ * If KMS is disabled DRIVER_MODESET and DRIVER_ATOMIC are masked
+ * out via drm_device::driver_features:
+ */
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
.open = virtio_gpu_driver_open,
.postclose = virtio_gpu_driver_postclose,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 27b7f14dae89..5a3b5aaed1f3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -43,11 +43,13 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
events_read, &events_read);
if (events_read & VIRTIO_GPU_EVENT_DISPLAY) {
- if (vgdev->has_edid)
- virtio_gpu_cmd_get_edids(vgdev);
- virtio_gpu_cmd_get_display_info(vgdev);
- virtio_gpu_notify(vgdev);
- drm_helper_hpd_irq_event(vgdev->ddev);
+ if (vgdev->num_scanouts) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
+ virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
+ drm_helper_hpd_irq_event(vgdev->ddev);
+ }
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config,
@@ -223,12 +225,15 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
num_scanouts, &num_scanouts);
vgdev->num_scanouts = min_t(uint32_t, num_scanouts,
VIRTIO_GPU_MAX_SCANOUTS);
- if (!vgdev->num_scanouts) {
- DRM_ERROR("num_scanouts is zero\n");
- ret = -EINVAL;
- goto err_scanouts;
+
+ if (!IS_ENABLED(CONFIG_DRM_VIRTIO_GPU_KMS) || !vgdev->num_scanouts) {
+ DRM_INFO("KMS disabled\n");
+ vgdev->num_scanouts = 0;
+ vgdev->has_edid = false;
+ dev->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ } else {
+ DRM_INFO("number of scanouts: %d\n", num_scanouts);
}
- DRM_INFO("number of scanouts: %d\n", num_scanouts);
virtio_cread_le(vgdev->vdev, struct virtio_gpu_config,
num_capsets, &num_capsets);
@@ -244,12 +249,14 @@ int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev)
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
- if (vgdev->has_edid)
- virtio_gpu_cmd_get_edids(vgdev);
- virtio_gpu_cmd_get_display_info(vgdev);
- virtio_gpu_notify(vgdev);
- wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
- 5 * HZ);
+ if (vgdev->num_scanouts) {
+ if (vgdev->has_edid)
+ virtio_gpu_cmd_get_edids(vgdev);
+ virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
+ wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
+ 5 * HZ);
+ }
return 0;
err_scanouts:
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index a04a9b20896d..e1accfc47edf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -923,8 +923,7 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
cmd_p->context_init = cpu_to_le32(context_init);
- strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
- cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
+ strscpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name));
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 2a644f035597..e94479d9cd5b 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
- vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
+ vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o \
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 4dcf2eb7aa80..82094c137855 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2011-2018 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2011-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,40 +26,31 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_placement.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
-#include "ttm_object.h"
-/**
- * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
- * vmw_buffer_object.
- *
- * @bo: Pointer to the TTM buffer object.
- * Return: Pointer to the struct vmw_buffer_object embedding the
- * TTM buffer object.
- */
-static struct vmw_buffer_object *
-vmw_buffer_object(struct ttm_buffer_object *bo)
+#include <drm/ttm/ttm_placement.h>
+
+static void vmw_bo_release(struct vmw_bo *vbo)
{
- return container_of(bo, struct vmw_buffer_object, base);
+ vmw_bo_unmap(vbo);
+ drm_gem_object_release(&vbo->tbo.base);
}
/**
- * bo_is_vmw - check if the buffer object is a &vmw_buffer_object
- * @bo: ttm buffer object to be checked
+ * vmw_bo_free - vmw_bo destructor
*
- * Uses destroy function associated with the object to determine if this is
- * a &vmw_buffer_object.
- *
- * Returns:
- * true if the object is of &vmw_buffer_object type, false if not.
+ * @bo: Pointer to the embedded struct ttm_buffer_object
*/
-static bool bo_is_vmw(struct ttm_buffer_object *bo)
+static void vmw_bo_free(struct ttm_buffer_object *bo)
{
- return bo->destroy == &vmw_bo_bo_free ||
- bo->destroy == &vmw_gem_destroy;
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
+
+ WARN_ON(vbo->dirty);
+ WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
+ vmw_bo_release(vbo);
+ kfree(vbo);
}
/**
@@ -72,13 +63,13 @@ static bool bo_is_vmw(struct ttm_buffer_object *bo)
* Return: Zero on success, Negative error code on failure. In particular
* -ERESTARTSYS if interrupted by a signal
*/
-int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
- struct ttm_placement *placement,
- bool interruptible)
+static int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
+ struct vmw_bo *buf,
+ struct ttm_placement *placement,
+ bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_buffer_object *bo = &buf->tbo;
int ret;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -87,12 +78,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
- if (buf->base.pin_count > 0)
- ret = ttm_resource_compat(bo->resource, placement)
- ? 0 : -EINVAL;
- else
- ret = ttm_bo_validate(bo, placement, &ctx);
-
+ ret = ttm_bo_validate(bo, placement, &ctx);
if (!ret)
vmw_bo_pin_reserved(buf, true);
@@ -115,11 +101,11 @@ err:
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_buffer_object *bo = &buf->tbo;
int ret;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -128,17 +114,17 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
goto err;
- if (buf->base.pin_count > 0) {
- ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement)
- ? 0 : -EINVAL;
- goto out_unreserve;
- }
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve;
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_VRAM);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
out_unreserve:
if (!ret)
@@ -163,7 +149,7 @@ err:
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
bool interruptible)
{
return vmw_bo_pin_in_placement(dev_priv, buf, &vmw_vram_placement,
@@ -184,22 +170,13 @@ int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
bool interruptible)
{
struct ttm_operation_ctx ctx = {interruptible, false };
- struct ttm_buffer_object *bo = &buf->base;
- struct ttm_placement placement;
- struct ttm_place place;
+ struct ttm_buffer_object *bo = &buf->tbo;
int ret = 0;
- place = vmw_vram_placement.placement[0];
- place.lpfn = PFN_UP(bo->resource->size);
- placement.num_placement = 1;
- placement.placement = &place;
- placement.num_busy_placement = 1;
- placement.busy_placement = &place;
-
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
@@ -213,16 +190,19 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
if (bo->resource->mem_type == TTM_PL_VRAM &&
bo->resource->start < PFN_UP(bo->resource->size) &&
bo->resource->start > 0 &&
- buf->base.pin_count == 0) {
+ buf->tbo.pin_count == 0) {
ctx.interruptible = false;
- (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_SYS,
+ VMW_BO_DOMAIN_SYS);
+ (void)ttm_bo_validate(bo, &buf->placement, &ctx);
}
- if (buf->base.pin_count > 0)
- ret = ttm_resource_compat(bo->resource, &placement)
- ? 0 : -EINVAL;
- else
- ret = ttm_bo_validate(bo, &placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_VRAM);
+ buf->places[0].lpfn = PFN_UP(bo->resource->size);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->resource->start != 0);
@@ -248,10 +228,10 @@ err_unlock:
* -ERESTARTSYS if interrupted by a signal
*/
int vmw_bo_unpin(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
bool interruptible)
{
- struct ttm_buffer_object *bo = &buf->base;
+ struct ttm_buffer_object *bo = &buf->tbo;
int ret;
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
@@ -293,12 +273,12 @@ void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
* @pin: Whether to pin or unpin.
*
*/
-void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
+void vmw_bo_pin_reserved(struct vmw_bo *vbo, bool pin)
{
struct ttm_operation_ctx ctx = { false, true };
struct ttm_place pl;
struct ttm_placement placement;
- struct ttm_buffer_object *bo = &vbo->base;
+ struct ttm_buffer_object *bo = &vbo->tbo;
uint32_t old_mem_type = bo->resource->mem_type;
int ret;
@@ -341,9 +321,9 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
* 3) Buffer object destruction
*
*/
-void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
+void *vmw_bo_map_and_cache(struct vmw_bo *vbo)
{
- struct ttm_buffer_object *bo = &vbo->base;
+ struct ttm_buffer_object *bo = &vbo->tbo;
bool not_used;
void *virtual;
int ret;
@@ -366,96 +346,70 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
* @vbo: The buffer object whose map we are tearing down.
*
* This function tears down a cached map set up using
- * vmw_buffer_object_map_and_cache().
+ * vmw_bo_map_and_cache().
*/
-void vmw_bo_unmap(struct vmw_buffer_object *vbo)
+void vmw_bo_unmap(struct vmw_bo *vbo)
{
if (vbo->map.bo == NULL)
return;
ttm_bo_kunmap(&vbo->map);
+ vbo->map.bo = NULL;
}
/**
- * vmw_bo_bo_free - vmw buffer object destructor
- *
- * @bo: Pointer to the embedded struct ttm_buffer_object
- */
-void vmw_bo_bo_free(struct ttm_buffer_object *bo)
-{
- struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo);
-
- WARN_ON(vmw_bo->dirty);
- WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
- vmw_bo_unmap(vmw_bo);
- drm_gem_object_release(&bo->base);
- kfree(vmw_bo);
-}
-
-/* default destructor */
-static void vmw_bo_default_destroy(struct ttm_buffer_object *bo)
-{
- kfree(bo);
-}
-
-/**
- * vmw_bo_create_kernel - Create a pinned BO for internal kernel use.
+ * vmw_bo_init - Initialize a vmw buffer object
*
* @dev_priv: Pointer to the device private struct
- * @size: size of the BO we need
- * @placement: where to put it
- * @p_bo: resulting BO
+ * @vmw_bo: Buffer object to initialize
+ * @params: Parameters used to initialize the buffer object
+ * @destroy: The function used to delete the buffer object
+ * Returns: Zero on success, negative error code on error.
*
- * Creates and pin a simple BO for in kernel use.
*/
-int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
- struct ttm_placement *placement,
- struct ttm_buffer_object **p_bo)
+static int vmw_bo_init(struct vmw_private *dev_priv,
+ struct vmw_bo *vmw_bo,
+ struct vmw_bo_params *params,
+ void (*destroy)(struct ttm_buffer_object *))
{
struct ttm_operation_ctx ctx = {
- .interruptible = false,
+ .interruptible = params->bo_type != ttm_bo_type_kernel,
.no_wait_gpu = false
};
- struct ttm_buffer_object *bo;
+ struct ttm_device *bdev = &dev_priv->bdev;
struct drm_device *vdev = &dev_priv->drm;
int ret;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (unlikely(!bo))
- return -ENOMEM;
+ memset(vmw_bo, 0, sizeof(*vmw_bo));
- size = ALIGN(size, PAGE_SIZE);
+ BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
+ vmw_bo->tbo.priority = 3;
+ vmw_bo->res_tree = RB_ROOT;
- drm_gem_private_object_init(vdev, &bo->base, size);
+ params->size = ALIGN(params->size, PAGE_SIZE);
+ drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
- ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, ttm_bo_type_kernel,
- placement, 0, &ctx, NULL, NULL,
- vmw_bo_default_destroy);
+ vmw_bo_placement_set(vmw_bo, params->domain, params->busy_domain);
+ ret = ttm_bo_init_reserved(bdev, &vmw_bo->tbo, params->bo_type,
+ &vmw_bo->placement, 0, &ctx, NULL,
+ NULL, destroy);
if (unlikely(ret))
- goto error_free;
+ return ret;
- ttm_bo_pin(bo);
- ttm_bo_unreserve(bo);
- *p_bo = bo;
+ if (params->pin)
+ ttm_bo_pin(&vmw_bo->tbo);
+ ttm_bo_unreserve(&vmw_bo->tbo);
return 0;
-
-error_free:
- kfree(bo);
- return ret;
}
int vmw_bo_create(struct vmw_private *vmw,
- size_t size, struct ttm_placement *placement,
- bool interruptible, bool pin,
- void (*bo_free)(struct ttm_buffer_object *bo),
- struct vmw_buffer_object **p_bo)
+ struct vmw_bo_params *params,
+ struct vmw_bo **p_bo)
{
int ret;
- BUG_ON(!bo_free);
-
*p_bo = kmalloc(sizeof(**p_bo), GFP_KERNEL);
if (unlikely(!*p_bo)) {
DRM_ERROR("Failed to allocate a buffer.\n");
@@ -465,9 +419,7 @@ int vmw_bo_create(struct vmw_private *vmw,
/*
* vmw_bo_init will delete the *p_bo object if it fails
*/
- ret = vmw_bo_init(vmw, *p_bo, size,
- placement, interruptible, pin,
- bo_free);
+ ret = vmw_bo_init(vmw, *p_bo, params, vmw_bo_free);
if (unlikely(ret != 0))
goto out_error;
@@ -478,57 +430,7 @@ out_error:
}
/**
- * vmw_bo_init - Initialize a vmw buffer object
- *
- * @dev_priv: Pointer to the device private struct
- * @vmw_bo: Pointer to the struct vmw_buffer_object to initialize.
- * @size: Buffer object size in bytes.
- * @placement: Initial placement.
- * @interruptible: Whether waits should be performed interruptible.
- * @pin: If the BO should be created pinned at a fixed location.
- * @bo_free: The buffer object destructor.
- * Returns: Zero on success, negative error code on error.
- *
- * Note that on error, the code will free the buffer object.
- */
-int vmw_bo_init(struct vmw_private *dev_priv,
- struct vmw_buffer_object *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interruptible, bool pin,
- void (*bo_free)(struct ttm_buffer_object *bo))
-{
- struct ttm_operation_ctx ctx = {
- .interruptible = interruptible,
- .no_wait_gpu = false
- };
- struct ttm_device *bdev = &dev_priv->bdev;
- struct drm_device *vdev = &dev_priv->drm;
- int ret;
-
- WARN_ON_ONCE(!bo_free);
- memset(vmw_bo, 0, sizeof(*vmw_bo));
- BUILD_BUG_ON(TTM_MAX_BO_PRIORITY <= 3);
- vmw_bo->base.priority = 3;
- vmw_bo->res_tree = RB_ROOT;
-
- size = ALIGN(size, PAGE_SIZE);
- drm_gem_private_object_init(vdev, &vmw_bo->base.base, size);
-
- ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, ttm_bo_type_device,
- placement, 0, &ctx, NULL, NULL, bo_free);
- if (unlikely(ret)) {
- return ret;
- }
-
- if (pin)
- ttm_bo_pin(&vmw_bo->base);
- ttm_bo_unreserve(&vmw_bo->base);
-
- return 0;
-}
-
-/**
- * vmw_user_bo_synccpu_grab - Grab a struct vmw_buffer_object for cpu
+ * vmw_user_bo_synccpu_grab - Grab a struct vmw_bo for cpu
* access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions.
*
@@ -541,11 +443,11 @@ int vmw_bo_init(struct vmw_private *dev_priv,
*
* A blocking grab will be automatically released when @tfile is closed.
*/
-static int vmw_user_bo_synccpu_grab(struct vmw_buffer_object *vmw_bo,
+static int vmw_user_bo_synccpu_grab(struct vmw_bo *vmw_bo,
uint32_t flags)
{
bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
- struct ttm_buffer_object *bo = &vmw_bo->base;
+ struct ttm_buffer_object *bo = &vmw_bo->tbo;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
@@ -588,17 +490,17 @@ static int vmw_user_bo_synccpu_release(struct drm_file *filp,
uint32_t handle,
uint32_t flags)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
int ret = vmw_user_bo_lookup(filp, handle, &vmw_bo);
if (!ret) {
if (!(flags & drm_vmw_synccpu_allow_cs)) {
atomic_dec(&vmw_bo->cpu_writers);
}
- ttm_bo_put(&vmw_bo->base);
+ ttm_bo_put(&vmw_bo->tbo);
}
- drm_gem_object_put(&vmw_bo->base.base);
+ drm_gem_object_put(&vmw_bo->tbo.base);
return ret;
}
@@ -620,7 +522,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
{
struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data;
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -639,7 +541,7 @@ int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
ret = vmw_user_bo_synccpu_grab(vbo, arg->flags);
vmw_bo_unreference(&vbo);
- drm_gem_object_put(&vbo->base.base);
+ drm_gem_object_put(&vbo->tbo.base);
if (unlikely(ret != 0)) {
if (ret == -ERESTARTSYS || ret == -EBUSY)
return -EBUSY;
@@ -683,8 +585,7 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_unref_dmabuf_arg *arg =
(struct drm_vmw_unref_dmabuf_arg *)data;
- drm_gem_handle_delete(file_priv, arg->handle);
- return 0;
+ return drm_gem_handle_delete(file_priv, arg->handle);
}
@@ -694,14 +595,14 @@ int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
* @filp: The file the handle is registered with.
* @handle: The user buffer object handle
* @out: Pointer to a where a pointer to the embedded
- * struct vmw_buffer_object should be placed.
+ * struct vmw_bo should be placed.
* Return: Zero on success, Negative error code on error.
*
* The vmw buffer object pointer will be refcounted (both ttm and gem)
*/
int vmw_user_bo_lookup(struct drm_file *filp,
- uint32_t handle,
- struct vmw_buffer_object **out)
+ u32 handle,
+ struct vmw_bo **out)
{
struct drm_gem_object *gobj;
@@ -712,8 +613,8 @@ int vmw_user_bo_lookup(struct drm_file *filp,
return -ESRCH;
}
- *out = gem_to_vmw_bo(gobj);
- ttm_bo_get(&(*out)->base);
+ *out = to_vmw_bo(gobj);
+ ttm_bo_get(&(*out)->tbo);
return 0;
}
@@ -734,8 +635,7 @@ void vmw_bo_fence_single(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence)
{
struct ttm_device *bdev = bo->bdev;
- struct vmw_private *dev_priv =
- container_of(bdev, struct vmw_private, bdev);
+ struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
int ret;
if (fence == NULL)
@@ -771,7 +671,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
int cpp = DIV_ROUND_UP(args->bpp, 8);
int ret;
@@ -795,7 +695,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
args->size, &args->handle,
&vbo);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put(&vbo->base.base);
+ drm_gem_object_put(&vbo->tbo.base);
return ret;
}
@@ -806,12 +706,8 @@ int vmw_dumb_create(struct drm_file *file_priv,
*/
void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
{
- /* Is @bo embedded in a struct vmw_buffer_object? */
- if (!bo_is_vmw(bo))
- return;
-
/* Kill any cached kernel maps before swapout */
- vmw_bo_unmap(vmw_buffer_object(bo));
+ vmw_bo_unmap(to_vmw_bo(&bo->base));
}
@@ -828,13 +724,7 @@ void vmw_bo_swap_notify(struct ttm_buffer_object *bo)
void vmw_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *mem)
{
- struct vmw_buffer_object *vbo;
-
- /* Make sure @bo is embedded in a struct vmw_buffer_object? */
- if (!bo_is_vmw(bo))
- return;
-
- vbo = container_of(bo, struct vmw_buffer_object, base);
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
/*
* Kill any cached kernel maps before move to or from VRAM.
@@ -852,3 +742,98 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
vmw_resource_unbind_list(vbo);
}
+
+static u32
+set_placement_list(struct ttm_place *pl, u32 domain)
+{
+ u32 n = 0;
+
+ /*
+ * The placements are ordered according to our preferences
+ */
+ if (domain & VMW_BO_DOMAIN_MOB) {
+ pl[n].mem_type = VMW_PL_MOB;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_GMR) {
+ pl[n].mem_type = VMW_PL_GMR;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_VRAM) {
+ pl[n].mem_type = TTM_PL_VRAM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_WAITABLE_SYS) {
+ pl[n].mem_type = VMW_PL_SYSTEM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ if (domain & VMW_BO_DOMAIN_SYS) {
+ pl[n].mem_type = TTM_PL_SYSTEM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+
+ WARN_ON(!n);
+ if (!n) {
+ pl[n].mem_type = TTM_PL_SYSTEM;
+ pl[n].flags = 0;
+ pl[n].fpfn = 0;
+ pl[n].lpfn = 0;
+ n++;
+ }
+ return n;
+}
+
+void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain)
+{
+ struct ttm_device *bdev = bo->tbo.bdev;
+ struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
+ struct ttm_placement *pl = &bo->placement;
+ bool mem_compatible = false;
+ u32 i;
+
+ pl->placement = bo->places;
+ pl->num_placement = set_placement_list(bo->places, domain);
+
+ if (drm_debug_enabled(DRM_UT_DRIVER) && bo->tbo.resource) {
+ for (i = 0; i < pl->num_placement; ++i) {
+ if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM ||
+ bo->tbo.resource->mem_type == pl->placement[i].mem_type)
+ mem_compatible = true;
+ }
+ if (!mem_compatible)
+ drm_warn(&vmw->drm,
+ "%s: Incompatible transition from "
+ "bo->base.resource->mem_type = %u to domain = %u\n",
+ __func__, bo->tbo.resource->mem_type, domain);
+ }
+
+ pl->busy_placement = bo->busy_places;
+ pl->num_busy_placement = set_placement_list(bo->busy_places, busy_domain);
+}
+
+void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo)
+{
+ struct ttm_device *bdev = bo->tbo.bdev;
+ struct vmw_private *vmw = vmw_priv_from_ttm(bdev);
+ u32 domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM;
+
+ if (vmw->has_mob)
+ domain = VMW_BO_DOMAIN_MOB;
+
+ vmw_bo_placement_set(bo, domain, domain);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
new file mode 100644
index 000000000000..50a836e70994
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2023 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#ifndef VMWGFX_BO_H
+#define VMWGFX_BO_H
+
+#include "device_include/svga_reg.h"
+
+#include <drm/ttm/ttm_bo.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include <linux/rbtree_types.h>
+#include <linux/types.h>
+
+struct vmw_bo_dirty;
+struct vmw_fence_obj;
+struct vmw_private;
+struct vmw_resource;
+
+enum vmw_bo_domain {
+ VMW_BO_DOMAIN_SYS = BIT(0),
+ VMW_BO_DOMAIN_WAITABLE_SYS = BIT(1),
+ VMW_BO_DOMAIN_VRAM = BIT(2),
+ VMW_BO_DOMAIN_GMR = BIT(3),
+ VMW_BO_DOMAIN_MOB = BIT(4),
+};
+
+struct vmw_bo_params {
+ u32 domain;
+ u32 busy_domain;
+ enum ttm_bo_type bo_type;
+ size_t size;
+ bool pin;
+};
+
+/**
+ * struct vmw_bo - TTM buffer object with vmwgfx additions
+ * @tbo: The TTM buffer object
+ * @placement: The preferred placement for this buffer object
+ * @places: The chosen places for the preferred placement.
+ * @busy_places: Chosen busy places for the preferred placement
+ * @map: Kmap object for semi-persistent mappings
+ * @res_tree: RB tree of resources using this buffer object as a backing MOB
+ * @res_prios: Eviction priority counts for attached resources
+ * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
+ * increased. May be decreased without reservation.
+ * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
+ * @dirty: structure for user-space dirty-tracking
+ */
+struct vmw_bo {
+ struct ttm_buffer_object tbo;
+
+ struct ttm_placement placement;
+ struct ttm_place places[5];
+ struct ttm_place busy_places[5];
+
+ /* Protected by reservation */
+ struct ttm_bo_kmap_obj map;
+
+ struct rb_root res_tree;
+ u32 res_prios[TTM_MAX_BO_PRIORITY];
+
+ atomic_t cpu_writers;
+ /* Not ref-counted. Protected by binding_mutex */
+ struct vmw_resource *dx_query_ctx;
+ struct vmw_bo_dirty *dirty;
+};
+
+void vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain);
+void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo);
+
+int vmw_bo_create(struct vmw_private *dev_priv,
+ struct vmw_bo_params *params,
+ struct vmw_bo **p_bo);
+
+int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
+ struct vmw_bo *buf,
+ bool interruptible);
+int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
+ struct vmw_bo *buf,
+ bool interruptible);
+int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
+ struct vmw_bo *bo,
+ bool interruptible);
+void vmw_bo_pin_reserved(struct vmw_bo *bo, bool pin);
+int vmw_bo_unpin(struct vmw_private *vmw_priv,
+ struct vmw_bo *bo,
+ bool interruptible);
+
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
+ SVGAGuestPtr *ptr);
+int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void vmw_bo_fence_single(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence);
+
+void *vmw_bo_map_and_cache(struct vmw_bo *vbo);
+void vmw_bo_unmap(struct vmw_bo *vbo);
+
+void vmw_bo_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_resource *mem);
+void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
+
+int vmw_user_bo_lookup(struct drm_file *filp,
+ u32 handle,
+ struct vmw_bo **out);
+/**
+ * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
+ * according to attached resources
+ * @vbo: The struct vmw_bo
+ */
+static inline void vmw_bo_prio_adjust(struct vmw_bo *vbo)
+{
+ int i = ARRAY_SIZE(vbo->res_prios);
+
+ while (i--) {
+ if (vbo->res_prios[i]) {
+ vbo->tbo.priority = i;
+ return;
+ }
+ }
+
+ vbo->tbo.priority = 3;
+}
+
+/**
+ * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
+ * eviction priority
+ * @vbo: The struct vmw_bo
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_add(struct vmw_bo *vbo, int prio)
+{
+ if (vbo->res_prios[prio]++ == 0)
+ vmw_bo_prio_adjust(vbo);
+}
+
+/**
+ * vmw_bo_used_prio_del - Notify a buffer object of a resource with a certain
+ * priority being removed
+ * @vbo: The struct vmw_bo
+ * @prio: The resource priority
+ *
+ * After being notified, the code assigns the highest resource eviction priority
+ * to the backing buffer object (mob).
+ */
+static inline void vmw_bo_prio_del(struct vmw_bo *vbo, int prio)
+{
+ if (--vbo->res_prios[prio] == 0)
+ vmw_bo_prio_adjust(vbo);
+}
+
+static inline void vmw_bo_unreference(struct vmw_bo **buf)
+{
+ struct vmw_bo *tmp_buf = *buf;
+
+ *buf = NULL;
+ if (tmp_buf)
+ ttm_bo_put(&tmp_buf->tbo);
+}
+
+static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf)
+{
+ ttm_bo_get(&buf->tbo);
+ return buf;
+}
+
+static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj)
+{
+ return container_of((gobj), struct vmw_bo, tbo.base);
+}
+
+#endif // VMWGFX_BO_H
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 162dfeb1cc5a..195ff8792e5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,13 +24,13 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-
-#include <linux/sched/signal.h>
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
+#include "vmwgfx_devcaps.h"
#include <drm/ttm/ttm_placement.h>
-#include "vmwgfx_drv.h"
-#include "vmwgfx_devcaps.h"
+#include <linux/sched/signal.h>
bool vmw_supports_3d(struct vmw_private *dev_priv)
{
@@ -567,7 +567,7 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
- struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
+ struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForQuery body;
@@ -613,7 +613,7 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
* without writing to the query result structure.
*/
- struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->base;
+ struct ttm_buffer_object *bo = &dev_priv->dummy_query_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdWaitForGBQuery body;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 2b843ff4b437..94e8982f5616 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2015-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,12 +25,13 @@
*
**************************************************************************/
-#include <linux/dmapool.h>
-#include <linux/pci.h>
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_bo.h>
-#include "vmwgfx_drv.h"
+#include <linux/dmapool.h>
+#include <linux/pci.h>
/*
* Size of inline command buffers. Try to make sure that a page size is a
@@ -79,7 +80,6 @@ struct vmw_cmdbuf_context {
* frees are protected by @lock.
* @cmd_space: Buffer object for the command buffer space, unless we were
* able to make a contigous coherent DMA memory allocation, @handle. Immutable.
- * @map_obj: Mapping state for @cmd_space. Immutable.
* @map: Pointer to command buffer space. May be a mapped buffer object or
* a contigous coherent DMA memory allocation. Immutable.
* @cur: Command buffer for small kernel command submissions. Protected by
@@ -116,8 +116,7 @@ struct vmw_cmdbuf_man {
struct vmw_cmdbuf_context ctx[SVGA_CB_CONTEXT_MAX];
struct list_head error;
struct drm_mm mm;
- struct ttm_buffer_object *cmd_space;
- struct ttm_bo_kmap_obj map_obj;
+ struct vmw_bo *cmd_space;
u8 *map;
struct vmw_cmdbuf_header *cur;
size_t cur_pos;
@@ -888,7 +887,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
header->cmd = man->map + offset;
if (man->using_mob) {
cb_hdr->flags = SVGA_CB_FLAG_MOB;
- cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
+ cb_hdr->ptr.mob.mobid = man->cmd_space->tbo.resource->start;
cb_hdr->ptr.mob.mobOffset = offset;
} else {
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
@@ -1221,7 +1220,6 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context,
int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
{
struct vmw_private *dev_priv = man->dev_priv;
- bool dummy;
int ret;
if (man->has_pool)
@@ -1234,6 +1232,13 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
if (man->map) {
man->using_mob = false;
} else {
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
+ .bo_type = ttm_bo_type_kernel,
+ .size = size,
+ .pin = true
+ };
/*
* DMA memory failed. If we can have command buffers in a
* MOB, try to use that instead. Note that this will
@@ -1244,19 +1249,12 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
!dev_priv->has_mob)
return -ENOMEM;
- ret = vmw_bo_create_kernel(dev_priv, size,
- &vmw_mob_placement,
- &man->cmd_space);
+ ret = vmw_bo_create(dev_priv, &bo_params, &man->cmd_space);
if (ret)
return ret;
- man->using_mob = true;
- ret = ttm_bo_kmap(man->cmd_space, 0, size >> PAGE_SHIFT,
- &man->map_obj);
- if (ret)
- goto out_no_map;
-
- man->map = ttm_kmap_obj_virtual(&man->map_obj, &dummy);
+ man->map = vmw_bo_map_and_cache(man->cmd_space);
+ man->using_mob = man->map;
}
man->size = size;
@@ -1276,14 +1274,6 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size)
(man->using_mob) ? "MOB" : "DMA");
return 0;
-
-out_no_map:
- if (man->using_mob) {
- ttm_bo_put(man->cmd_space);
- man->cmd_space = NULL;
- }
-
- return ret;
}
/**
@@ -1382,14 +1372,11 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man)
man->has_pool = false;
man->default_size = VMW_CMDBUF_INLINE_SIZE;
(void) vmw_cmdbuf_idle(man, false, 10*HZ);
- if (man->using_mob) {
- (void) ttm_bo_kunmap(&man->map_obj);
- ttm_bo_put(man->cmd_space);
- man->cmd_space = NULL;
- } else {
+ if (man->using_mob)
+ vmw_bo_unreference(&man->cmd_space);
+ else
dma_free_coherent(man->dev_priv->drm.dev,
man->size, man->map, man->handle);
- }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index e0f48cd9529b..ecc503e42790 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,9 +27,10 @@
#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
-#include "vmwgfx_binding.h"
struct vmw_user_context {
struct ttm_base_object base;
@@ -38,7 +39,7 @@ struct vmw_user_context {
struct vmw_cmdbuf_res_manager *man;
struct vmw_resource *cotables[SVGA_COTABLE_MAX];
spinlock_t cotable_lock;
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
};
static void vmw_user_context_free(struct vmw_resource *res);
@@ -72,10 +73,11 @@ const struct vmw_user_resource_conv *user_context_converter =
static const struct vmw_res_func vmw_legacy_context_func = {
.res_type = vmw_res_context,
- .needs_backup = false,
+ .needs_guest_memory = false,
.may_evict = false,
.type_name = "legacy contexts",
- .backup_placement = NULL,
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
.create = NULL,
.destroy = NULL,
.bind = NULL,
@@ -84,12 +86,13 @@ static const struct vmw_res_func vmw_legacy_context_func = {
static const struct vmw_res_func vmw_gb_context_func = {
.res_type = vmw_res_context,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "guest backed contexts",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_context_create,
.destroy = vmw_gb_context_destroy,
.bind = vmw_gb_context_bind,
@@ -98,12 +101,13 @@ static const struct vmw_res_func vmw_gb_context_func = {
static const struct vmw_res_func vmw_dx_context_func = {
.res_type = vmw_res_dx_context,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "dx contexts",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_context_create,
.destroy = vmw_dx_context_destroy,
.bind = vmw_dx_context_bind,
@@ -182,7 +186,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
- res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) :
+ res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
sizeof(SVGAGBContextData));
ret = vmw_resource_init(dev_priv, res, true,
res_free,
@@ -354,8 +358,8 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
cmd->body.mobid = bo->resource->start;
- cmd->body.validContents = res->backup_dirty;
- res->backup_dirty = false;
+ cmd->body.validContents = res->guest_memory_dirty;
+ res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -521,8 +525,8 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
cmd->body.mobid = bo->resource->start;
- cmd->body.validContents = res->backup_dirty;
- res->backup_dirty = false;
+ cmd->body.validContents = res->guest_memory_dirty;
+ res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -853,7 +857,7 @@ vmw_context_binding_state(struct vmw_resource *ctx)
* specified in the parameter. 0 otherwise.
*/
int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
- struct vmw_buffer_object *mob)
+ struct vmw_bo *mob)
{
struct vmw_user_context *uctx =
container_of(ctx_res, struct vmw_user_context, res);
@@ -885,7 +889,7 @@ int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
*
* @ctx_res: The context resource
*/
-struct vmw_buffer_object *
+struct vmw_bo *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
{
struct vmw_user_context *uctx =
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index b78a10312fad..c0b24d1cacbf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2014-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2014-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -30,13 +30,14 @@
* whenever the backing MOB is evicted.
*/
-#include <drm/ttm/ttm_placement.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_mksstat.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
+#include <drm/ttm/ttm_placement.h>
+
/**
* struct vmw_cotable - Context Object Table resource
*
@@ -130,12 +131,13 @@ static int vmw_cotable_destroy(struct vmw_resource *res);
static const struct vmw_res_func vmw_cotable_func = {
.res_type = vmw_res_cotable,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "context guest backed object tables",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_cotable_create,
.destroy = vmw_cotable_destroy,
.bind = vmw_cotable_bind,
@@ -180,7 +182,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_private *dev_priv = res->dev_priv;
- struct ttm_buffer_object *bo = &res->backup->base;
+ struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdDXSetCOTable body;
@@ -228,7 +230,7 @@ static int vmw_cotable_bind(struct vmw_resource *res,
* take the opportunity to correct the value here so that it's not
* misused in the future.
*/
- val_buf->bo = &res->backup->base;
+ val_buf->bo = &res->guest_memory_bo->tbo;
return vmw_cotable_unscrub(res);
}
@@ -289,7 +291,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
cmd0->body.cid = vcotbl->ctx->id;
cmd0->body.type = vcotbl->type;
cmd1 = (void *) &cmd0[1];
- vcotbl->size_read_back = res->backup_size;
+ vcotbl->size_read_back = res->guest_memory_size;
}
cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
cmd1->header.size = sizeof(cmd1->body);
@@ -371,12 +373,12 @@ static int vmw_cotable_readback(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
cmd->body.type = vcotbl->type;
- vcotbl->size_read_back = res->backup_size;
+ vcotbl->size_read_back = res->guest_memory_size;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
}
(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
- vmw_bo_fence_single(&res->backup->base, fence);
+ vmw_bo_fence_single(&res->guest_memory_bo->tbo, fence);
vmw_fence_obj_unreference(&fence);
return 0;
@@ -399,14 +401,21 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res);
- struct vmw_buffer_object *buf, *old_buf = res->backup;
- struct ttm_buffer_object *bo, *old_bo = &res->backup->base;
- size_t old_size = res->backup_size;
+ struct vmw_bo *buf, *old_buf = res->guest_memory_bo;
+ struct ttm_buffer_object *bo, *old_bo = &res->guest_memory_bo->tbo;
+ size_t old_size = res->guest_memory_size;
size_t old_size_read_back = vcotbl->size_read_back;
size_t cur_size_read_back;
struct ttm_bo_kmap_obj old_map, new_map;
int ret;
size_t i;
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
+ .bo_type = ttm_bo_type_device,
+ .size = new_size,
+ .pin = true
+ };
MKS_STAT_TIME_DECL(MKSSTAT_KERN_COTABLE_RESIZE);
MKS_STAT_TIME_PUSH(MKSSTAT_KERN_COTABLE_RESIZE);
@@ -423,14 +432,13 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
* for the new COTable. Initially pin the buffer object to make sure
* we can use tryreserve without failure.
*/
- ret = vmw_bo_create(dev_priv, new_size, &vmw_mob_placement,
- true, true, vmw_bo_bo_free, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (ret) {
DRM_ERROR("Failed initializing new cotable MOB.\n");
goto out_done;
}
- bo = &buf->base;
+ bo = &buf->tbo;
WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, NULL));
ret = ttm_bo_wait(old_bo, false, false);
@@ -464,15 +472,18 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
}
/* Unpin new buffer, and switch backup buffers. */
- ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_MOB,
+ VMW_BO_DOMAIN_MOB);
+ ret = ttm_bo_validate(bo, &buf->placement, &ctx);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed validating new COTable backup buffer.\n");
goto out_wait;
}
vmw_resource_mob_detach(res);
- res->backup = buf;
- res->backup_size = new_size;
+ res->guest_memory_bo = buf;
+ res->guest_memory_size = new_size;
vcotbl->size_read_back = cur_size_read_back;
/*
@@ -482,8 +493,8 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
ret = vmw_cotable_unscrub(res);
if (ret) {
DRM_ERROR("Failed switching COTable backup buffer.\n");
- res->backup = old_buf;
- res->backup_size = old_size;
+ res->guest_memory_bo = old_buf;
+ res->guest_memory_size = old_size;
vcotbl->size_read_back = old_size_read_back;
vmw_resource_mob_attach(res);
goto out_wait;
@@ -498,7 +509,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
if (unlikely(ret))
goto out_wait;
- /* Release the pin acquired in vmw_bo_init */
+ /* Release the pin acquired in vmw_bo_create */
ttm_bo_unpin(bo);
MKS_STAT_TIME_POP(MKSSTAT_KERN_COTABLE_RESIZE);
@@ -533,7 +544,7 @@ out_done:
static int vmw_cotable_create(struct vmw_resource *res)
{
struct vmw_cotable *vcotbl = vmw_cotable(res);
- size_t new_size = res->backup_size;
+ size_t new_size = res->guest_memory_size;
size_t needed_size;
int ret;
@@ -542,7 +553,7 @@ static int vmw_cotable_create(struct vmw_resource *res)
while (needed_size > new_size)
new_size *= 2;
- if (likely(new_size <= res->backup_size)) {
+ if (likely(new_size <= res->guest_memory_size)) {
if (vcotbl->scrubbed && vmw_resource_mob_attached(res)) {
ret = vmw_cotable_unscrub(res);
if (ret)
@@ -606,12 +617,12 @@ struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&vcotbl->resource_list);
vcotbl->res.id = type;
- vcotbl->res.backup_size = PAGE_SIZE;
+ vcotbl->res.guest_memory_size = PAGE_SIZE;
num_entries = PAGE_SIZE / co_info[type].size;
if (num_entries < co_info[type].min_initial_entries) {
- vcotbl->res.backup_size = co_info[type].min_initial_entries *
+ vcotbl->res.guest_memory_size = co_info[type].min_initial_entries *
co_info[type].size;
- vcotbl->res.backup_size = PFN_ALIGN(vcotbl->res.backup_size);
+ vcotbl->res.guest_memory_size = PFN_ALIGN(vcotbl->res.guest_memory_size);
}
vcotbl->scrubbed = true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9ad28346aff7..2588615a2a38 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -28,9 +28,10 @@
#include "vmwgfx_drv.h"
+#include "vmwgfx_bo.h"
+#include "vmwgfx_binding.h"
#include "vmwgfx_devcaps.h"
#include "vmwgfx_mksstat.h"
-#include "vmwgfx_binding.h"
#include "ttm_object.h"
#include <drm/drm_aperture.h>
@@ -386,27 +387,32 @@ static void vmw_print_sm_type(struct vmw_private *dev_priv)
static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{
int ret;
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
struct ttm_bo_kmap_obj map;
volatile SVGA3dQueryResult *result;
bool dummy;
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_kernel,
+ .size = PAGE_SIZE,
+ .pin = true
+ };
/*
* Create the vbo as pinned, so that a tryreserve will
* immediately succeed. This is because we're the only
* user of the bo currently.
*/
- ret = vmw_bo_create(dev_priv, PAGE_SIZE,
- &vmw_sys_placement, false, true,
- &vmw_bo_bo_free, &vbo);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(&vbo->base, false, true, NULL);
+ ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
BUG_ON(ret != 0);
vmw_bo_pin_reserved(vbo, true);
- ret = ttm_bo_kmap(&vbo->base, 0, 1, &map);
+ ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map);
if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
@@ -415,7 +421,7 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
ttm_bo_kunmap(&map);
}
vmw_bo_pin_reserved(vbo, false);
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
if (unlikely(ret != 0)) {
DRM_ERROR("Dummy query buffer map failed.\n");
@@ -1565,7 +1571,7 @@ static const struct file_operations vmwgfx_driver_fops = {
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = vmw_unlocked_ioctl,
- .mmap = vmw_mmap,
+ .mmap = drm_gem_mmap,
.poll = drm_poll,
.read = drm_read,
#if defined(CONFIG_COMPAT)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 203fa32cd4c1..fb8f0c0642c0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -117,32 +117,6 @@ struct vmwgfx_hash_item {
unsigned long key;
};
-/**
- * struct vmw_buffer_object - TTM buffer object with vmwgfx additions
- * @base: The TTM buffer object
- * @res_tree: RB tree of resources using this buffer object as a backing MOB
- * @base_mapped_count: ttm BO mapping count; used by KMS atomic helpers.
- * @cpu_writers: Number of synccpu write grabs. Protected by reservation when
- * increased. May be decreased without reservation.
- * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
- * @map: Kmap object for semi-persistent mappings
- * @res_prios: Eviction priority counts for attached resources
- * @dirty: structure for user-space dirty-tracking
- */
-struct vmw_buffer_object {
- struct ttm_buffer_object base;
- struct rb_root res_tree;
- /* For KMS atomic helpers: ttm bo mapping count */
- atomic_t base_mapped_count;
-
- atomic_t cpu_writers;
- /* Not ref-counted. Protected by binding_mutex */
- struct vmw_resource *dx_query_ctx;
- /* Protected by reservation */
- struct ttm_bo_kmap_obj map;
- u32 res_prios[TTM_MAX_BO_PRIORITY];
- struct vmw_bo_dirty *dirty;
-};
/**
* struct vmw_validate_buffer - Carries validation info about buffers.
@@ -168,21 +142,23 @@ struct vmw_res_func;
* @kref: For refcounting.
* @dev_priv: Pointer to the device private for this resource. Immutable.
* @id: Device id. Protected by @dev_priv::resource_lock.
- * @backup_size: Backup buffer size. Immutable.
- * @res_dirty: Resource contains data not yet in the backup buffer. Protected
- * by resource reserved.
- * @backup_dirty: Backup buffer contains data not yet in the HW resource.
+ * @guest_memory_size: Guest memory buffer size. Immutable.
+ * @res_dirty: Resource contains data not yet in the guest memory buffer.
* Protected by resource reserved.
+ * @guest_memory_dirty: Guest memory buffer contains data not yet in the HW
+ * resource. Protected by resource reserved.
* @coherent: Emulate coherency by tracking vm accesses.
- * @backup: The backup buffer if any. Protected by resource reserved.
- * @backup_offset: Offset into the backup buffer if any. Protected by resource
- * reserved. Note that only a few resource types can have a @backup_offset
- * different from zero.
+ * @guest_memory_bo: The guest memory buffer if any. Protected by resource
+ * reserved.
+ * @guest_memory_offset: Offset into the guest memory buffer if any. Protected
+ * by resource reserved. Note that only a few resource types can have a
+ * @guest_memory_offset different from zero.
* @pin_count: The pin count for this resource. A pinned resource has a
* pin-count greater than zero. It is not on the resource LRU lists and its
- * backup buffer is pinned. Hence it can't be evicted.
+ * guest memory buffer is pinned. Hence it can't be evicted.
* @func: Method vtable for this resource. Immutable.
- * @mob_node; Node for the MOB backup rbtree. Protected by @backup reserved.
+ * @mob_node; Node for the MOB guest memory rbtree. Protected by
+ * @guest_memory_bo reserved.
* @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock.
* @binding_head: List head for the context binding list. Protected by
* the @dev_priv::binding_mutex
@@ -190,18 +166,20 @@ struct vmw_res_func;
* @hw_destroy: Callback to destroy the resource on the device, as part of
* resource destruction.
*/
+struct vmw_bo;
+struct vmw_bo;
struct vmw_resource_dirty;
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
int id;
u32 used_prio;
- unsigned long backup_size;
+ unsigned long guest_memory_size;
u32 res_dirty : 1;
- u32 backup_dirty : 1;
+ u32 guest_memory_dirty : 1;
u32 coherent : 1;
- struct vmw_buffer_object *backup;
- unsigned long backup_offset;
+ struct vmw_bo *guest_memory_bo;
+ unsigned long guest_memory_offset;
unsigned long pin_count;
const struct vmw_res_func *func;
struct rb_node mob_node;
@@ -446,7 +424,7 @@ struct vmw_sw_context{
struct drm_file *filp;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
- struct vmw_buffer_object *cur_query_bo;
+ struct vmw_bo *cur_query_bo;
struct list_head bo_relocations;
struct list_head res_relocations;
uint32_t *buf_start;
@@ -458,7 +436,7 @@ struct vmw_sw_context{
struct list_head staged_cmd_res;
struct list_head ctx_list;
struct vmw_ctx_validation_info *dx_ctx_node;
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
struct vmw_resource *dx_query_ctx;
struct vmw_cmdbuf_res_manager *man;
struct vmw_validation_context *ctx;
@@ -492,7 +470,7 @@ struct vmw_otable_batch {
unsigned num_otables;
struct vmw_otable *otables;
struct vmw_resource *context;
- struct ttm_buffer_object *otable_bo;
+ struct vmw_bo *otable_bo;
};
enum {
@@ -632,8 +610,8 @@ struct vmw_private {
* are protected by the cmdbuf mutex.
*/
- struct vmw_buffer_object *dummy_query_bo;
- struct vmw_buffer_object *pinned_bo;
+ struct vmw_bo *dummy_query_bo;
+ struct vmw_bo *pinned_bo;
uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned;
@@ -677,11 +655,6 @@ struct vmw_private {
#endif
};
-static inline struct vmw_buffer_object *gem_to_vmw_bo(struct drm_gem_object *gobj)
-{
- return container_of((gobj), struct vmw_buffer_object, base.base);
-}
-
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
{
return container_of(res, struct vmw_surface, res);
@@ -692,6 +665,11 @@ static inline struct vmw_private *vmw_priv(struct drm_device *dev)
return (struct vmw_private *)dev->dev_private;
}
+static inline struct vmw_private *vmw_priv_from_ttm(struct ttm_device *bdev)
+{
+ return container_of(bdev, struct vmw_private, bdev);
+}
+
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
{
return (struct vmw_fpriv *)file_priv->driver_priv;
@@ -825,7 +803,7 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
- struct vmw_buffer_object **out_buf);
+ struct vmw_bo **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
@@ -844,20 +822,20 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
extern void vmw_resource_unreserve(struct vmw_resource *res,
bool dirty_set,
bool dirty,
- bool switch_backup,
- struct vmw_buffer_object *new_backup,
- unsigned long new_backup_offset);
+ bool switch_guest_memory,
+ struct vmw_bo *new_guest_memory,
+ unsigned long new_guest_memory_offset);
extern void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *old_mem,
struct ttm_resource *new_mem);
-extern int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob);
-extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
-extern void vmw_resource_unbind_list(struct vmw_buffer_object *vbo);
+int vmw_query_readback_all(struct vmw_bo *dx_query_mob);
+void vmw_resource_evict_all(struct vmw_private *dev_priv);
+void vmw_resource_unbind_list(struct vmw_bo *vbo);
void vmw_resource_mob_attach(struct vmw_resource *res);
void vmw_resource_mob_detach(struct vmw_resource *res);
void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
pgoff_t end);
-int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
pgoff_t end, pgoff_t *num_prefault);
/**
@@ -872,117 +850,15 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
}
/**
- * Buffer object helper functions - vmwgfx_bo.c
- */
-extern int vmw_bo_pin_in_placement(struct vmw_private *vmw_priv,
- struct vmw_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible);
-extern int vmw_bo_pin_in_vram(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
- bool interruptible);
-extern int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
- bool interruptible);
-extern int vmw_bo_pin_in_start_of_vram(struct vmw_private *vmw_priv,
- struct vmw_buffer_object *bo,
- bool interruptible);
-extern int vmw_bo_unpin(struct vmw_private *vmw_priv,
- struct vmw_buffer_object *bo,
- bool interruptible);
-extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
- SVGAGuestPtr *ptr);
-extern void vmw_bo_pin_reserved(struct vmw_buffer_object *bo, bool pin);
-extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
-extern int vmw_bo_create_kernel(struct vmw_private *dev_priv,
- unsigned long size,
- struct ttm_placement *placement,
- struct ttm_buffer_object **p_bo);
-extern int vmw_bo_create(struct vmw_private *dev_priv,
- size_t size, struct ttm_placement *placement,
- bool interruptible, bool pin,
- void (*bo_free)(struct ttm_buffer_object *bo),
- struct vmw_buffer_object **p_bo);
-extern int vmw_bo_init(struct vmw_private *dev_priv,
- struct vmw_buffer_object *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interruptible, bool pin,
- void (*bo_free)(struct ttm_buffer_object *bo));
-extern int vmw_bo_unref_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_user_bo_synccpu_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int vmw_user_bo_lookup(struct drm_file *filp,
- uint32_t handle,
- struct vmw_buffer_object **out);
-extern void vmw_bo_fence_single(struct ttm_buffer_object *bo,
- struct vmw_fence_obj *fence);
-extern void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo);
-extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
-extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
- struct ttm_resource *mem);
-extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-
-/**
- * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
- * according to attached resources
- * @vbo: The struct vmw_buffer_object
- */
-static inline void vmw_bo_prio_adjust(struct vmw_buffer_object *vbo)
-{
- int i = ARRAY_SIZE(vbo->res_prios);
-
- while (i--) {
- if (vbo->res_prios[i]) {
- vbo->base.priority = i;
- return;
- }
- }
-
- vbo->base.priority = 3;
-}
-
-/**
- * vmw_bo_prio_add - Notify a buffer object of a newly attached resource
- * eviction priority
- * @vbo: The struct vmw_buffer_object
- * @prio: The resource priority
- *
- * After being notified, the code assigns the highest resource eviction priority
- * to the backing buffer object (mob).
- */
-static inline void vmw_bo_prio_add(struct vmw_buffer_object *vbo, int prio)
-{
- if (vbo->res_prios[prio]++ == 0)
- vmw_bo_prio_adjust(vbo);
-}
-
-/**
- * vmw_bo_prio_del - Notify a buffer object of a resource with a certain
- * priority being removed
- * @vbo: The struct vmw_buffer_object
- * @prio: The resource priority
- *
- * After being notified, the code assigns the highest resource eviction priority
- * to the backing buffer object (mob).
- */
-static inline void vmw_bo_prio_del(struct vmw_buffer_object *vbo, int prio)
-{
- if (--vbo->res_prios[prio] == 0)
- vmw_bo_prio_adjust(vbo);
-}
-
-/**
* GEM related functionality - vmwgfx_gem.c
*/
extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
uint32_t *handle,
- struct vmw_buffer_object **p_vbo);
+ struct vmw_bo **p_vbo);
extern int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
-extern void vmw_gem_destroy(struct ttm_buffer_object *bo);
extern void vmw_debugfs_gem_init(struct vmw_private *vdev);
/**
@@ -1056,29 +932,20 @@ vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
}
/**
- * TTM glue - vmwgfx_ttm_glue.c
- */
-
-extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
-
-/**
* TTM buffer object driver - vmwgfx_ttm_buffer.c
*/
extern const size_t vmw_tt_size;
extern struct ttm_placement vmw_vram_placement;
-extern struct ttm_placement vmw_vram_sys_placement;
extern struct ttm_placement vmw_vram_gmr_placement;
extern struct ttm_placement vmw_sys_placement;
-extern struct ttm_placement vmw_srf_placement;
-extern struct ttm_placement vmw_mob_placement;
-extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_device_funcs vmw_bo_driver;
extern const struct vmw_sg_table *
vmw_bo_sg_table(struct ttm_buffer_object *bo);
-extern int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
- unsigned long bo_size,
- struct ttm_buffer_object **bo_p);
+int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
+ size_t bo_size,
+ u32 domain,
+ struct vmw_bo **bo_p);
extern void vmw_piter_start(struct vmw_piter *viter,
const struct vmw_sg_table *vsgt,
@@ -1297,8 +1164,8 @@ vmw_context_binding_state(struct vmw_resource *ctx);
extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
bool readback);
extern int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
- struct vmw_buffer_object *mob);
-extern struct vmw_buffer_object *
+ struct vmw_bo *mob);
+extern struct vmw_bo *
vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
@@ -1523,12 +1390,12 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/* Resource dirtying - vmwgfx_page_dirty.c */
-void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo);
-int vmw_bo_dirty_add(struct vmw_buffer_object *vbo);
+void vmw_bo_dirty_scan(struct vmw_bo *vbo);
+int vmw_bo_dirty_add(struct vmw_bo *vbo);
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res);
void vmw_bo_dirty_clear_res(struct vmw_resource *res);
-void vmw_bo_dirty_release(struct vmw_buffer_object *vbo);
-void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
+void vmw_bo_dirty_release(struct vmw_bo *vbo);
+void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end);
vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf);
vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf);
@@ -1561,22 +1428,6 @@ static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
return srf;
}
-static inline void vmw_bo_unreference(struct vmw_buffer_object **buf)
-{
- struct vmw_buffer_object *tmp_buf = *buf;
-
- *buf = NULL;
- if (tmp_buf != NULL)
- ttm_bo_put(&tmp_buf->base);
-}
-
-static inline struct vmw_buffer_object *
-vmw_bo_reference(struct vmw_buffer_object *buf)
-{
- ttm_bo_get(&buf->base);
- return buf;
-}
-
static inline void vmw_fifo_resource_inc(struct vmw_private *dev_priv)
{
atomic_inc(&dev_priv->num_fifo_resources);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 0590bb22c73a..6b9aa2b4ef54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009 - 2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,17 +24,17 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-#include <linux/sync_file.h>
-#include <linux/hashtable.h>
-
+#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
-#include "vmwgfx_reg.h"
+#include "vmwgfx_mksstat.h"
+#include "vmwgfx_so.h"
+
#include <drm/ttm/ttm_bo.h>
#include <drm/ttm/ttm_placement.h>
-#include "vmwgfx_so.h"
-#include "vmwgfx_binding.h"
-#include "vmwgfx_mksstat.h"
+#include <linux/sync_file.h>
+#include <linux/hashtable.h>
/*
* Helper macro to get dx_ctx_node if available otherwise print an error
@@ -65,7 +65,7 @@
*/
struct vmw_relocation {
struct list_head head;
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
union {
SVGAMobId *mob_loc;
SVGAGuestPtr *location;
@@ -149,7 +149,7 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
- struct vmw_buffer_object **vmw_bo_p);
+ struct vmw_bo **vmw_bo_p);
/**
* vmw_ptr_diff - Compute the offset from a to b in bytes
*
@@ -475,12 +475,16 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (has_sm4_context(dev_priv) &&
vmw_res_type(ctx) == vmw_res_dx_context) {
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
dx_query_mob = vmw_context_get_dx_query_mob(ctx);
- if (dx_query_mob)
+ if (dx_query_mob) {
+ vmw_bo_placement_set(dx_query_mob,
+ VMW_BO_DOMAIN_MOB,
+ VMW_BO_DOMAIN_MOB);
ret = vmw_validation_add_bo(sw_context->ctx,
- dx_query_mob, true, false);
+ dx_query_mob);
+ }
}
mutex_unlock(&dev_priv->binding_mutex);
@@ -596,7 +600,7 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
return ret;
if (sw_context->dx_query_mob) {
- struct vmw_buffer_object *expected_dx_query_mob;
+ struct vmw_bo *expected_dx_query_mob;
expected_dx_query_mob =
vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
@@ -703,7 +707,7 @@ res_check_done:
static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
@@ -718,7 +722,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = ctx_res->id;
- cmd->body.mobid = dx_query_mob->base.resource->start;
+ cmd->body.mobid = dx_query_mob->tbo.resource->start;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
@@ -1017,7 +1021,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* after successful submission of the current command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
- struct vmw_buffer_object *new_query_bo,
+ struct vmw_bo *new_query_bo,
struct vmw_sw_context *sw_context)
{
struct vmw_res_cache_entry *ctx_entry =
@@ -1029,24 +1033,24 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
- if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
+ if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
if (unlikely(sw_context->cur_query_bo != NULL)) {
sw_context->needs_post_query_barrier = true;
+ vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
ret = vmw_validation_add_bo(sw_context->ctx,
- sw_context->cur_query_bo,
- dev_priv->has_mob, false);
+ sw_context->cur_query_bo);
if (unlikely(ret != 0))
return ret;
}
sw_context->cur_query_bo = new_query_bo;
+ vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
ret = vmw_validation_add_bo(sw_context->ctx,
- dev_priv->dummy_query_bo,
- dev_priv->has_mob, false);
+ dev_priv->dummy_query_bo);
if (unlikely(ret != 0))
return ret;
}
@@ -1145,9 +1149,9 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAMobId *id,
- struct vmw_buffer_object **vmw_bo_p)
+ struct vmw_bo **vmw_bo_p)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
uint32_t handle = *id;
struct vmw_relocation *reloc;
int ret;
@@ -1158,9 +1162,10 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
- ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
- ttm_bo_put(&vmw_bo->base);
- drm_gem_object_put(&vmw_bo->base.base);
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
+ ttm_bo_put(&vmw_bo->tbo);
+ drm_gem_object_put(&vmw_bo->tbo.base);
if (unlikely(ret != 0))
return ret;
@@ -1200,9 +1205,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGAGuestPtr *ptr,
- struct vmw_buffer_object **vmw_bo_p)
+ struct vmw_bo **vmw_bo_p)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
uint32_t handle = ptr->gmrId;
struct vmw_relocation *reloc;
int ret;
@@ -1213,9 +1218,11 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
- ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
- ttm_bo_put(&vmw_bo->base);
- drm_gem_object_put(&vmw_bo->base.base);
+ vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
+ ttm_bo_put(&vmw_bo->tbo);
+ drm_gem_object_put(&vmw_bo->tbo.base);
if (unlikely(ret != 0))
return ret;
@@ -1280,7 +1287,7 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
int ret;
cmd = container_of(header, typeof(*cmd), header);
@@ -1363,7 +1370,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
int ret;
@@ -1393,7 +1400,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
int ret;
@@ -1439,7 +1446,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
int ret;
@@ -1467,7 +1474,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
int ret;
@@ -1504,7 +1511,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_buffer_object *vmw_bo = NULL;
+ struct vmw_bo *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
int ret;
@@ -1528,7 +1535,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
return ret;
/* Make sure DMA doesn't cross BO boundaries. */
- bo_size = vmw_bo->base.base.size;
+ bo_size = vmw_bo->tbo.base.size;
if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL;
@@ -1551,7 +1558,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
return 0;
}
@@ -1670,7 +1677,7 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf)
{
- struct vmw_buffer_object *vmw_bo;
+ struct vmw_bo *vmw_bo;
struct {
uint32_t header;
@@ -1701,7 +1708,7 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_resource *res, uint32_t *buf_id,
unsigned long backup_offset)
{
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
void *info;
int ret;
@@ -3754,7 +3761,7 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
struct ttm_buffer_object *bo;
list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
- bo = &reloc->vbo->base;
+ bo = &reloc->vbo->tbo;
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
reloc->location->offset += bo->resource->start << PAGE_SHIFT;
@@ -4364,13 +4371,17 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
if (dev_priv->pinned_bo == NULL)
goto out_unlock;
- ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo, false,
- false);
+ vmw_bo_placement_set(dev_priv->pinned_bo,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
if (ret)
goto out_no_reserve;
- ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo, false,
- false);
+ vmw_bo_placement_set(dev_priv->dummy_query_bo,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
if (ret)
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 66cc35dc223e..2a0cda324703 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index 4d2c28e39f4e..d6baf73a6458 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
- * Copyright 2021 VMware, Inc.
+ * Copyright 2021-2023 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -24,31 +24,17 @@
*
*/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "drm/drm_prime.h"
#include "drm/drm_gem_ttm_helper.h"
-/**
- * vmw_buffer_object - Convert a struct ttm_buffer_object to a struct
- * vmw_buffer_object.
- *
- * @bo: Pointer to the TTM buffer object.
- * Return: Pointer to the struct vmw_buffer_object embedding the
- * TTM buffer object.
- */
-static struct vmw_buffer_object *
-vmw_buffer_object(struct ttm_buffer_object *bo)
-{
- return container_of(bo, struct vmw_buffer_object, base);
-}
-
static void vmw_gem_object_free(struct drm_gem_object *gobj)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj);
- if (bo) {
+ if (bo)
ttm_bo_put(bo);
- }
}
static int vmw_gem_object_open(struct drm_gem_object *obj,
@@ -65,7 +51,7 @@ static void vmw_gem_object_close(struct drm_gem_object *obj,
static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj);
- struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
+ struct vmw_bo *vbo = to_vmw_bo(obj);
int ret;
ret = ttm_bo_reserve(bo, false, false, NULL);
@@ -103,6 +89,13 @@ static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj)
return drm_prime_pages_to_sg(obj->dev, vmw_tt->dma_ttm.pages, vmw_tt->dma_ttm.num_pages);
}
+static const struct vm_operations_struct vmw_vm_ops = {
+ .pfn_mkwrite = vmw_bo_vm_mkwrite,
+ .page_mkwrite = vmw_bo_vm_mkwrite,
+ .fault = vmw_bo_vm_fault,
+ .open = ttm_bo_vm_open,
+ .close = ttm_bo_vm_close,
+};
static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.free = vmw_gem_object_free,
@@ -115,43 +108,31 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = {
.vmap = drm_gem_ttm_vmap,
.vunmap = drm_gem_ttm_vunmap,
.mmap = drm_gem_ttm_mmap,
+ .vm_ops = &vmw_vm_ops,
};
-/**
- * vmw_gem_destroy - vmw buffer object destructor
- *
- * @bo: Pointer to the embedded struct ttm_buffer_object
- */
-void vmw_gem_destroy(struct ttm_buffer_object *bo)
-{
- struct vmw_buffer_object *vbo = vmw_buffer_object(bo);
-
- WARN_ON(vbo->dirty);
- WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree));
- vmw_bo_unmap(vbo);
- drm_gem_object_release(&vbo->base.base);
- kfree(vbo);
-}
-
int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t size,
uint32_t *handle,
- struct vmw_buffer_object **p_vbo)
+ struct vmw_bo **p_vbo)
{
int ret;
-
- ret = vmw_bo_create(dev_priv, size,
- (dev_priv->has_mob) ?
- &vmw_sys_placement :
- &vmw_vram_sys_placement,
- true, false, &vmw_gem_destroy, p_vbo);
+ struct vmw_bo_params params = {
+ .domain = (dev_priv->has_mob) ? VMW_BO_DOMAIN_SYS : VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_device,
+ .size = size,
+ .pin = false
+ };
+
+ ret = vmw_bo_create(dev_priv, &params, p_vbo);
if (ret != 0)
goto out_no_bo;
- (*p_vbo)->base.base.funcs = &vmw_gem_object_funcs;
+ (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs;
- ret = drm_gem_handle_create(filp, &(*p_vbo)->base.base, handle);
+ ret = drm_gem_handle_create(filp, &(*p_vbo)->tbo.base, handle);
out_no_bo:
return ret;
}
@@ -165,7 +146,7 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_buffer_object *vbo;
+ struct vmw_bo *vbo;
uint32_t handle;
int ret;
@@ -175,23 +156,23 @@ int vmw_gem_object_create_ioctl(struct drm_device *dev, void *data,
goto out_no_bo;
rep->handle = handle;
- rep->map_handle = drm_vma_node_offset_addr(&vbo->base.base.vma_node);
+ rep->map_handle = drm_vma_node_offset_addr(&vbo->tbo.base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put(&vbo->base.base);
+ drm_gem_object_put(&vbo->tbo.base);
out_no_bo:
return ret;
}
#if defined(CONFIG_DEBUG_FS)
-static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_file *m)
+static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m)
{
const char *placement;
const char *type;
- switch (bo->base.resource->mem_type) {
+ switch (bo->tbo.resource->mem_type) {
case TTM_PL_SYSTEM:
placement = " CPU";
break;
@@ -212,7 +193,7 @@ static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_f
break;
}
- switch (bo->base.type) {
+ switch (bo->tbo.type) {
case ttm_bo_type_device:
type = "device";
break;
@@ -228,12 +209,12 @@ static void vmw_bo_print_info(int id, struct vmw_buffer_object *bo, struct seq_f
}
seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s",
- id, bo->base.base.size, placement, type);
+ id, bo->tbo.base.size, placement, type);
seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d",
- bo->base.priority,
- bo->base.pin_count,
- kref_read(&bo->base.base.refcount),
- kref_read(&bo->base.kref));
+ bo->tbo.priority,
+ bo->tbo.pin_count,
+ kref_read(&bo->tbo.base.refcount),
+ kref_read(&bo->tbo.kref));
seq_puts(m, "\n");
}
@@ -267,7 +248,7 @@ static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, id) {
- struct vmw_buffer_object *bo = gem_to_vmw_bo(gobj);
+ struct vmw_bo *bo = to_vmw_bo(gobj);
vmw_bo_print_info(id, bo, m);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 445d619e1fdc..84d6380b9895 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,8 +24,9 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-
#include "vmwgfx_kms.h"
+
+#include "vmwgfx_bo.h"
#include "vmw_surface_cache.h"
#include <drm/drm_atomic.h>
@@ -152,9 +153,8 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
SVGAGBCursorHeader *header;
SVGAGBAlphaCursorHeader *alpha_header;
const u32 image_size = width * height * sizeof(*image);
- bool dummy;
- header = ttm_kmap_obj_virtual(&vps->cursor.map, &dummy);
+ header = vmw_bo_map_and_cache(vps->cursor.bo);
alpha_header = &header->header.alphaHeader;
memset(header, 0, sizeof(*header));
@@ -169,7 +169,7 @@ static void vmw_cursor_update_mob(struct vmw_private *dev_priv,
memcpy(header + 1, image, image_size);
vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID,
- vps->cursor.bo->resource->start);
+ vps->cursor.bo->tbo.resource->start);
}
@@ -184,13 +184,13 @@ static u32 vmw_du_cursor_mob_size(u32 w, u32 h)
*/
static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps)
{
- bool dummy;
+ bool is_iomem;
if (vps->surf) {
if (vps->surf_mapped)
- return vmw_bo_map_and_cache(vps->surf->res.backup);
+ return vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
return vps->surf->snooper.image;
} else if (vps->bo)
- return ttm_kmap_obj_virtual(&vps->bo->map, &dummy);
+ return ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem);
return NULL;
}
@@ -222,15 +222,13 @@ static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps,
return changed;
}
-static void vmw_du_destroy_cursor_mob(struct ttm_buffer_object **bo)
+static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo)
{
- if (!(*bo))
+ if (!(*vbo))
return;
- ttm_bo_unpin(*bo);
- ttm_bo_put(*bo);
- kfree(*bo);
- *bo = NULL;
+ ttm_bo_unpin(&(*vbo)->tbo);
+ vmw_bo_unreference(vbo);
}
static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
@@ -254,8 +252,8 @@ static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp,
/* Cache is full: See if this mob is bigger than an existing mob. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
- if (vcp->cursor_mobs[i]->base.size <
- vps->cursor.bo->base.size) {
+ if (vcp->cursor_mobs[i]->tbo.base.size <
+ vps->cursor.bo->tbo.base.size) {
vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]);
vcp->cursor_mobs[i] = vps->cursor.bo;
vps->cursor.bo = NULL;
@@ -288,7 +286,7 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
return -EINVAL;
if (vps->cursor.bo) {
- if (vps->cursor.bo->base.size >= size)
+ if (vps->cursor.bo->tbo.base.size >= size)
return 0;
vmw_du_put_cursor_mob(vcp, vps);
}
@@ -296,26 +294,27 @@ static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp,
/* Look for an unused mob in the cache. */
for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) {
if (vcp->cursor_mobs[i] &&
- vcp->cursor_mobs[i]->base.size >= size) {
+ vcp->cursor_mobs[i]->tbo.base.size >= size) {
vps->cursor.bo = vcp->cursor_mobs[i];
vcp->cursor_mobs[i] = NULL;
return 0;
}
}
/* Create a new mob if we can't find an existing one. */
- ret = vmw_bo_create_kernel(dev_priv, size, &vmw_mob_placement,
- &vps->cursor.bo);
+ ret = vmw_bo_create_and_populate(dev_priv, size,
+ VMW_BO_DOMAIN_MOB,
+ &vps->cursor.bo);
if (ret != 0)
return ret;
/* Fence the mob creation so we are guarateed to have the mob */
- ret = ttm_bo_reserve(vps->cursor.bo, false, false, NULL);
+ ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL);
if (ret != 0)
goto teardown;
- vmw_bo_fence_single(vps->cursor.bo, NULL);
- ttm_bo_unreserve(vps->cursor.bo);
+ vmw_bo_fence_single(&vps->cursor.bo->tbo, NULL);
+ ttm_bo_unreserve(&vps->cursor.bo->tbo);
return 0;
teardown:
@@ -363,7 +362,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
SVGA3dCopyBox *box;
unsigned box_count;
void *virtual;
- bool dummy;
+ bool is_iomem;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
@@ -423,7 +422,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
if (unlikely(ret != 0))
goto err_unreserve;
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
+ virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) {
memcpy(srf->snooper.image, virtual,
@@ -573,39 +572,30 @@ vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps)
{
int ret;
u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h);
- struct ttm_buffer_object *bo = vps->cursor.bo;
+ struct ttm_buffer_object *bo;
- if (!bo)
+ if (!vps->cursor.bo)
return -EINVAL;
+ bo = &vps->cursor.bo->tbo;
+
if (bo->base.size < size)
return -EINVAL;
- if (vps->cursor.mapped)
+ if (vps->cursor.bo->map.virtual)
return 0;
ret = ttm_bo_reserve(bo, false, false, NULL);
-
if (unlikely(ret != 0))
return -ENOMEM;
- ret = ttm_bo_kmap(bo, 0, PFN_UP(size), &vps->cursor.map);
-
- /*
- * We just want to try to get mob bind to finish
- * so that the first write to SVGA_REG_CURSOR_MOBID
- * is done with a buffer that the device has already
- * seen
- */
- (void) ttm_bo_wait(bo, false, false);
+ vmw_bo_map_and_cache(vps->cursor.bo);
ttm_bo_unreserve(bo);
if (unlikely(ret != 0))
return -ENOMEM;
- vps->cursor.mapped = true;
-
return 0;
}
@@ -622,19 +612,15 @@ static int
vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps)
{
int ret = 0;
- struct ttm_buffer_object *bo = vps->cursor.bo;
-
- if (!vps->cursor.mapped)
- return 0;
+ struct vmw_bo *vbo = vps->cursor.bo;
- if (!bo)
+ if (!vbo || !vbo->map.virtual)
return 0;
- ret = ttm_bo_reserve(bo, true, false, NULL);
+ ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL);
if (likely(ret == 0)) {
- ttm_bo_kunmap(&vps->cursor.map);
- ttm_bo_unreserve(bo);
- vps->cursor.mapped = false;
+ vmw_bo_unmap(vbo);
+ ttm_bo_unreserve(&vbo->tbo);
}
return ret;
@@ -657,20 +643,19 @@ vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane,
{
struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane);
struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state);
- bool dummy;
+ bool is_iomem;
if (vps->surf_mapped) {
- vmw_bo_unmap(vps->surf->res.backup);
+ vmw_bo_unmap(vps->surf->res.guest_memory_bo);
vps->surf_mapped = false;
}
- if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &dummy)) {
- const int ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
+ if (vps->bo && ttm_kmap_obj_virtual(&vps->bo->map, &is_iomem)) {
+ const int ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
if (likely(ret == 0)) {
- if (atomic_read(&vps->bo->base_mapped_count) == 0)
- ttm_bo_kunmap(&vps->bo->map);
- ttm_bo_unreserve(&vps->bo->base);
+ ttm_bo_kunmap(&vps->bo->map);
+ ttm_bo_unreserve(&vps->bo->tbo);
}
}
@@ -736,29 +721,26 @@ vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane,
* reserve the ttm_buffer_object first which
* vmw_bo_map_and_cache() omits.
*/
- ret = ttm_bo_reserve(&vps->bo->base, true, false, NULL);
+ ret = ttm_bo_reserve(&vps->bo->tbo, true, false, NULL);
if (unlikely(ret != 0))
return -ENOMEM;
- ret = ttm_bo_kmap(&vps->bo->base, 0, PFN_UP(size), &vps->bo->map);
-
- if (likely(ret == 0))
- atomic_inc(&vps->bo->base_mapped_count);
+ ret = ttm_bo_kmap(&vps->bo->tbo, 0, PFN_UP(size), &vps->bo->map);
- ttm_bo_unreserve(&vps->bo->base);
+ ttm_bo_unreserve(&vps->bo->tbo);
if (unlikely(ret != 0))
return -ENOMEM;
- } else if (vps->surf && !vps->bo && vps->surf->res.backup) {
+ } else if (vps->surf && !vps->bo && vps->surf->res.guest_memory_bo) {
WARN_ON(vps->surf->snooper.image);
- ret = ttm_bo_reserve(&vps->surf->res.backup->base, true, false,
+ ret = ttm_bo_reserve(&vps->surf->res.guest_memory_bo->tbo, true, false,
NULL);
if (unlikely(ret != 0))
return -ENOMEM;
- vmw_bo_map_and_cache(vps->surf->res.backup);
- ttm_bo_unreserve(&vps->surf->res.backup->base);
+ vmw_bo_map_and_cache(vps->surf->res.guest_memory_bo);
+ ttm_bo_unreserve(&vps->surf->res.guest_memory_bo->tbo);
vps->surf_mapped = true;
}
@@ -785,7 +767,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state);
s32 hotspot_x, hotspot_y;
- bool dummy;
hotspot_x = du->hotspot_x;
hotspot_y = du->hotspot_y;
@@ -827,11 +808,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane,
hotspot_x, hotspot_y);
}
- if (vps->bo) {
- if (ttm_kmap_obj_virtual(&vps->bo->map, &dummy))
- atomic_dec(&vps->bo->base_mapped_count);
- }
-
du->cursor_x = new_state->crtc_x + du->set_gui_x;
du->cursor_y = new_state->crtc_y + du->set_gui_y;
@@ -935,7 +911,7 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
WARN_ON(!surface);
if (!surface ||
- (!surface->snooper.image && !surface->res.backup)) {
+ (!surface->snooper.image && !surface->res.guest_memory_bo)) {
DRM_ERROR("surface not suitable for cursor\n");
return -EINVAL;
}
@@ -1279,9 +1255,9 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
user_fence_rep, vclips, num_clips,
NULL);
case vmw_du_screen_target:
- return vmw_kms_stdu_dma(dev_priv, file_priv, vfb,
- user_fence_rep, NULL, vclips, num_clips,
- 1, false, true, NULL);
+ return vmw_kms_stdu_readback(dev_priv, file_priv, vfb,
+ user_fence_rep, NULL, vclips, num_clips,
+ 1, NULL);
default:
WARN_ONCE(true,
"Readback called with invalid display system.\n");
@@ -1406,7 +1382,7 @@ static int vmw_framebuffer_bo_create_handle(struct drm_framebuffer *fb,
struct vmw_framebuffer_bo *vfbd =
vmw_framebuffer_to_vfbd(fb);
- return drm_gem_handle_create(file_priv, &vfbd->buffer->base.base, handle);
+ return drm_gem_handle_create(file_priv, &vfbd->buffer->tbo.base, handle);
}
static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer)
@@ -1486,69 +1462,6 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = {
.dirty = vmw_framebuffer_bo_dirty_ext,
};
-/*
- * Pin the bofer in a location suitable for access by the
- * display system.
- */
-static int vmw_framebuffer_pin(struct vmw_framebuffer *vfb)
-{
- struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_buffer_object *buf;
- struct ttm_placement *placement;
- int ret;
-
- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
-
- if (!buf)
- return 0;
-
- switch (dev_priv->active_display_unit) {
- case vmw_du_legacy:
- vmw_overlay_pause_all(dev_priv);
- ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
- vmw_overlay_resume_all(dev_priv);
- break;
- case vmw_du_screen_object:
- case vmw_du_screen_target:
- if (vfb->bo) {
- if (dev_priv->capabilities & SVGA_CAP_3D) {
- /*
- * Use surface DMA to get content to
- * sreen target surface.
- */
- placement = &vmw_vram_gmr_placement;
- } else {
- /* Use CPU blit. */
- placement = &vmw_sys_placement;
- }
- } else {
- /* Use surface / image update */
- placement = &vmw_mob_placement;
- }
-
- return vmw_bo_pin_in_placement(dev_priv, buf, placement, false);
- default:
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
-{
- struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
- struct vmw_buffer_object *buf;
-
- buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
- vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.backup;
-
- if (WARN_ON(!buf))
- return 0;
-
- return vmw_bo_unpin(dev_priv, buf, false);
-}
-
/**
* vmw_create_bo_proxy - create a proxy surface for the buffer object
*
@@ -1566,7 +1479,7 @@ static int vmw_framebuffer_unpin(struct vmw_framebuffer *vfb)
*/
static int vmw_create_bo_proxy(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct vmw_buffer_object *bo_mob,
+ struct vmw_bo *bo_mob,
struct vmw_surface **srf_out)
{
struct vmw_surface_metadata metadata = {0};
@@ -1618,9 +1531,9 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
/* Reserve and switch the backing mob. */
mutex_lock(&res->dev_priv->cmdbuf_mutex);
(void) vmw_resource_reserve(res, false, true);
- vmw_bo_unreference(&res->backup);
- res->backup = vmw_bo_reference(bo_mob);
- res->backup_offset = 0;
+ vmw_bo_unreference(&res->guest_memory_bo);
+ res->guest_memory_bo = vmw_bo_reference(bo_mob);
+ res->guest_memory_offset = 0;
vmw_resource_unreserve(res, false, false, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
@@ -1630,7 +1543,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
- struct vmw_buffer_object *bo,
+ struct vmw_bo *bo,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd2
*mode_cmd)
@@ -1642,7 +1555,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
int ret;
requested_size = mode_cmd->height * mode_cmd->pitches[0];
- if (unlikely(requested_size > bo->base.base.size)) {
+ if (unlikely(requested_size > bo->tbo.base.size)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
@@ -1663,7 +1576,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv,
goto out_err1;
}
- vfbd->base.base.obj[0] = &bo->base.base;
+ vfbd->base.base.obj[0] = &bo->tbo.base;
drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd);
vfbd->base.bo = true;
vfbd->buffer = vmw_bo_reference(bo);
@@ -1718,7 +1631,7 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height)
*/
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_buffer_object *bo,
+ struct vmw_bo *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd)
@@ -1765,9 +1678,6 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
if (ret)
return ERR_PTR(ret);
- vfb->pin = vmw_framebuffer_pin;
- vfb->unpin = vmw_framebuffer_unpin;
-
return vfb;
}
@@ -1782,7 +1692,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
- struct vmw_buffer_object *bo = NULL;
+ struct vmw_bo *bo = NULL;
int ret;
/* returns either a bo or surface */
@@ -1817,7 +1727,7 @@ err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo) {
vmw_bo_unreference(&bo);
- drm_gem_object_put(&bo->base.base);
+ drm_gem_object_put(&bo->tbo.base);
}
if (surface)
vmw_surface_unreference(&surface);
@@ -3076,8 +2986,20 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
struct vmw_framebuffer_bo *vfbbo =
container_of(update->vfb, typeof(*vfbbo), base);
- ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer, false,
- update->cpu_blit);
+ /*
+ * For screen targets we want a mappable bo, for everything else we want
+ * accelerated i.e. host backed (vram or gmr) bo. If the display unit
+ * is not screen target then mob's shouldn't be available.
+ */
+ if (update->dev_priv->active_display_unit == vmw_du_screen_target) {
+ vmw_bo_placement_set(vfbbo->buffer,
+ VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR,
+ VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_GMR);
+ } else {
+ WARN_ON(update->dev_priv->has_mob);
+ vmw_bo_placement_set_default_accelerated(vfbbo->buffer);
+ }
+ ret = vmw_validation_add_bo(&val_ctx, vfbbo->buffer);
} else {
struct vmw_framebuffer_surface *vfbs =
container_of(update->vfb, typeof(*vfbs), base);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 4d6e7b555db7..3de7b4b6a230 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**************************************************************************
*
- * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -126,7 +126,6 @@ struct vmw_du_update_plane {
struct vmw_framebuffer *vfb;
struct vmw_fence_obj **out_fence;
struct mutex *mutex;
- bool cpu_blit;
bool intr;
};
@@ -217,8 +216,6 @@ struct vmw_kms_dirty {
*/
struct vmw_framebuffer {
struct drm_framebuffer base;
- int (*pin)(struct vmw_framebuffer *fb);
- int (*unpin)(struct vmw_framebuffer *fb);
bool bo;
uint32_t user_handle;
};
@@ -233,7 +230,7 @@ struct vmw_clip_rect {
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
- struct vmw_buffer_object *buffer;
+ struct vmw_bo *buffer;
struct list_head head;
bool is_bo_proxy; /* true if this is proxy surface for DMA buf */
};
@@ -241,7 +238,7 @@ struct vmw_framebuffer_surface {
struct vmw_framebuffer_bo {
struct vmw_framebuffer base;
- struct vmw_buffer_object *buffer;
+ struct vmw_bo *buffer;
};
@@ -273,9 +270,7 @@ struct vmw_crtc_state {
};
struct vmw_cursor_plane_state {
- struct ttm_buffer_object *bo;
- struct ttm_bo_kmap_obj map;
- bool mapped;
+ struct vmw_bo *bo;
s32 hotspot_x;
s32 hotspot_y;
};
@@ -293,7 +288,7 @@ struct vmw_cursor_plane_state {
struct vmw_plane_state {
struct drm_plane_state base;
struct vmw_surface *surf;
- struct vmw_buffer_object *bo;
+ struct vmw_bo *bo;
int content_fb_type;
unsigned long bo_size;
@@ -346,7 +341,7 @@ struct vmw_connector_state {
struct vmw_cursor_plane {
struct drm_plane base;
- struct ttm_buffer_object *cursor_mobs[3];
+ struct vmw_bo *cursor_mobs[3];
};
/**
@@ -364,7 +359,7 @@ struct vmw_display_unit {
struct vmw_cursor_plane cursor;
struct vmw_surface *cursor_surface;
- struct vmw_buffer_object *cursor_bo;
+ struct vmw_bo *cursor_bo;
size_t cursor_age;
int cursor_x;
@@ -397,7 +392,7 @@ struct vmw_display_unit {
struct vmw_validation_ctx {
struct vmw_resource *res;
- struct vmw_buffer_object *buf;
+ struct vmw_bo *buf;
};
#define vmw_crtc_to_du(x) \
@@ -458,7 +453,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
uint32_t num_clips);
struct vmw_framebuffer *
vmw_kms_new_framebuffer(struct vmw_private *dev_priv,
- struct vmw_buffer_object *bo,
+ struct vmw_bo *bo,
struct vmw_surface *surface,
bool only_2d,
const struct drm_mode_fb_cmd2 *mode_cmd);
@@ -566,17 +561,15 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc);
-int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_clip_rect *clips,
- struct drm_vmw_rect *vclips,
- uint32_t num_clips,
- int increment,
- bool to_surface,
- bool interruptible,
- struct drm_crtc *crtc);
+int vmw_kms_stdu_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ struct drm_crtc *crtc);
int vmw_du_helper_plane_update(struct vmw_du_update_plane *update);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a56e5d0ca3c6..c0e42f2ed144 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,11 +25,13 @@
*
**************************************************************************/
+#include "vmwgfx_bo.h"
+#include "vmwgfx_kms.h"
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
-#include "vmwgfx_kms.h"
#define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc)
@@ -134,6 +136,47 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
return 0;
}
+/*
+ * Pin the buffer in a location suitable for access by the
+ * display system.
+ */
+static int vmw_ldu_fb_pin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_bo *buf;
+ int ret;
+
+ buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
+
+ if (!buf)
+ return 0;
+ WARN_ON(dev_priv->active_display_unit != vmw_du_legacy);
+
+ if (dev_priv->active_display_unit == vmw_du_legacy) {
+ vmw_overlay_pause_all(dev_priv);
+ ret = vmw_bo_pin_in_start_of_vram(dev_priv, buf, false);
+ vmw_overlay_resume_all(dev_priv);
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int vmw_ldu_fb_unpin(struct vmw_framebuffer *vfb)
+{
+ struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
+ struct vmw_bo *buf;
+
+ buf = vfb->bo ? vmw_framebuffer_to_vfbd(&vfb->base)->buffer :
+ vmw_framebuffer_to_vfbs(&vfb->base)->surface->res.guest_memory_bo;
+
+ if (WARN_ON(!buf))
+ return 0;
+
+ return vmw_bo_unpin(dev_priv, buf, false);
+}
+
static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
struct vmw_legacy_display_unit *ldu)
{
@@ -145,8 +188,7 @@ static int vmw_ldu_del_active(struct vmw_private *vmw_priv,
list_del_init(&ldu->active);
if (--(ld->num_active) == 0) {
BUG_ON(!ld->fb);
- if (ld->fb->unpin)
- ld->fb->unpin(ld->fb);
+ WARN_ON(vmw_ldu_fb_unpin(ld->fb));
ld->fb = NULL;
}
@@ -163,11 +205,10 @@ static int vmw_ldu_add_active(struct vmw_private *vmw_priv,
BUG_ON(!ld->num_active && ld->fb);
if (vfb != ld->fb) {
- if (ld->fb && ld->fb->unpin)
- ld->fb->unpin(ld->fb);
+ if (ld->fb)
+ WARN_ON(vmw_ldu_fb_unpin(ld->fb));
vmw_svga_enable(vmw_priv);
- if (vfb->pin)
- vfb->pin(vfb);
+ WARN_ON(vmw_ldu_fb_pin(vfb));
ld->fb = vfb;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 0a8cc28d6606..7055cbefc768 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2012-2021 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2012-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,10 +25,11 @@
*
**************************************************************************/
-#include <linux/highmem.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
+#include <linux/highmem.h>
+
#ifdef CONFIG_64BIT
#define VMW_PPN_SIZE 8
#define VMW_MOBFMT_PTDEPTH_0 SVGA3D_MOBFMT_PT64_0
@@ -50,7 +51,7 @@
* @pt_root_page DMA address of the level 0 page of the page table.
*/
struct vmw_mob {
- struct ttm_buffer_object *pt_bo;
+ struct vmw_bo *pt_bo;
unsigned long num_pages;
unsigned pt_level;
dma_addr_t pt_root_page;
@@ -203,7 +204,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
if (otable->page_table == NULL)
return;
- bo = otable->page_table->pt_bo;
+ bo = &otable->page_table->pt_bo->tbo;
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
return;
@@ -251,7 +252,9 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
bo_size += otables[i].size;
}
- ret = vmw_bo_create_and_populate(dev_priv, bo_size, &batch->otable_bo);
+ ret = vmw_bo_create_and_populate(dev_priv, bo_size,
+ VMW_BO_DOMAIN_WAITABLE_SYS,
+ &batch->otable_bo);
if (unlikely(ret != 0))
return ret;
@@ -260,7 +263,8 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv,
if (!batch->otables[i].enabled)
continue;
- ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo,
+ ret = vmw_setup_otable_base(dev_priv, i,
+ &batch->otable_bo->tbo,
offset,
&otables[i]);
if (unlikely(ret != 0))
@@ -277,8 +281,8 @@ out_no_setup:
&batch->otables[i]);
}
- vmw_bo_unpin_unlocked(batch->otable_bo);
- ttm_bo_put(batch->otable_bo);
+ vmw_bo_unpin_unlocked(&batch->otable_bo->tbo);
+ ttm_bo_put(&batch->otable_bo->tbo);
batch->otable_bo = NULL;
return ret;
}
@@ -329,7 +333,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
struct vmw_otable_batch *batch)
{
SVGAOTableType i;
- struct ttm_buffer_object *bo = batch->otable_bo;
+ struct ttm_buffer_object *bo = &batch->otable_bo->tbo;
int ret;
for (i = 0; i < batch->num_otables; ++i)
@@ -344,8 +348,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv,
ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
- ttm_bo_put(batch->otable_bo);
- batch->otable_bo = NULL;
+ vmw_bo_unreference(&batch->otable_bo);
}
/*
@@ -413,7 +416,9 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv,
{
BUG_ON(mob->pt_bo != NULL);
- return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE, &mob->pt_bo);
+ return vmw_bo_create_and_populate(dev_priv, mob->num_pages * PAGE_SIZE,
+ VMW_BO_DOMAIN_WAITABLE_SYS,
+ &mob->pt_bo);
}
/**
@@ -494,7 +499,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
unsigned long num_data_pages)
{
unsigned long num_pt_pages = 0;
- struct ttm_buffer_object *bo = mob->pt_bo;
+ struct ttm_buffer_object *bo = &mob->pt_bo->tbo;
struct vmw_piter save_pt_iter = {0};
struct vmw_piter pt_iter;
const struct vmw_sg_table *vsgt;
@@ -531,9 +536,8 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob,
void vmw_mob_destroy(struct vmw_mob *mob)
{
if (mob->pt_bo) {
- vmw_bo_unpin_unlocked(mob->pt_bo);
- ttm_bo_put(mob->pt_bo);
- mob->pt_bo = NULL;
+ vmw_bo_unpin_unlocked(&mob->pt_bo->tbo);
+ vmw_bo_unreference(&mob->pt_bo);
}
kfree(mob);
}
@@ -552,7 +556,7 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
SVGA3dCmdDestroyGBMob body;
} *cmd;
int ret;
- struct ttm_buffer_object *bo = mob->pt_bo;
+ struct ttm_buffer_object *bo = &mob->pt_bo->tbo;
if (bo) {
ret = ttm_bo_reserve(bo, false, true, NULL);
@@ -644,9 +648,8 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
out_no_cmd_space:
vmw_fifo_resource_dec(dev_priv);
if (pt_set_up) {
- vmw_bo_unpin_unlocked(mob->pt_bo);
- ttm_bo_put(mob->pt_bo);
- mob->pt_bo = NULL;
+ vmw_bo_unpin_unlocked(&mob->pt_bo->tbo);
+ vmw_bo_unreference(&mob->pt_bo);
}
return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index b5b311f2a91a..8d171d71cb8a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2014 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,19 +24,19 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-
-#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_bo.h"
+#include "vmwgfx_drv.h"
#include "device_include/svga_overlay.h"
#include "device_include/svga_escape.h"
-#include "vmwgfx_drv.h"
+#include <drm/ttm/ttm_placement.h>
#define VMW_MAX_NUM_STREAMS 1
#define VMW_OVERLAY_CAP_MASK (SVGA_FIFO_CAP_VIDEO | SVGA_FIFO_CAP_ESCAPE)
struct vmw_stream {
- struct vmw_buffer_object *buf;
+ struct vmw_bo *buf;
bool claimed;
bool paused;
struct drm_vmw_control_stream_arg saved;
@@ -92,7 +92,7 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
* -ERESTARTSYS if interrupted by a signal.
*/
static int vmw_overlay_send_put(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
@@ -140,7 +140,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
for (i = 0; i < num_items; i++)
items[i].registerId = i;
- vmw_bo_get_guest_ptr(&buf->base, &ptr);
+ vmw_bo_get_guest_ptr(&buf->tbo, &ptr);
ptr.offset += arg->offset;
items[SVGA_VIDEO_ENABLED].value = true;
@@ -223,7 +223,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
* used with GMRs instead of being locked to vram.
*/
static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
bool pin, bool inter)
{
if (!pin)
@@ -295,7 +295,7 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
* -ERESTARTSYS if interrupted.
*/
static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buf,
+ struct vmw_bo *buf,
struct drm_vmw_control_stream_arg *arg,
bool interruptible)
{
@@ -433,7 +433,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct vmw_overlay *overlay = dev_priv->overlay_priv;
struct drm_vmw_control_stream_arg *arg =
(struct drm_vmw_control_stream_arg *)data;
- struct vmw_buffer_object *buf;
+ struct vmw_bo *buf;
struct vmw_resource *res;
int ret;
@@ -458,7 +458,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
vmw_bo_unreference(&buf);
- drm_gem_object_put(&buf->base.base);
+ drm_gem_object_put(&buf->tbo.base);
out_unlock:
mutex_unlock(&overlay->mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index f41f041559f4..74ff2812d66a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2019 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -24,6 +24,7 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
/*
@@ -78,11 +79,11 @@ struct vmw_bo_dirty {
* dirty structure with the results. This function may change the
* dirty-tracking method.
*/
-static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
+static void vmw_bo_dirty_scan_pagetable(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
- struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
+ struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t num_marked;
num_marked = clean_record_shared_mapping_range
@@ -116,26 +117,25 @@ static void vmw_bo_dirty_scan_pagetable(struct vmw_buffer_object *vbo)
*
* This function may change the dirty-tracking method.
*/
-static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
+static void vmw_bo_dirty_scan_mkwrite(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
- struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
+ struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
pgoff_t num_marked;
if (dirty->end <= dirty->start)
return;
- num_marked = wp_shared_mapping_range(vbo->base.bdev->dev_mapping,
- dirty->start + offset,
- dirty->end - dirty->start);
+ num_marked = wp_shared_mapping_range(vbo->tbo.bdev->dev_mapping,
+ dirty->start + offset,
+ dirty->end - dirty->start);
if (100UL * num_marked / dirty->bitmap_size >
- VMW_DIRTY_PERCENTAGE) {
+ VMW_DIRTY_PERCENTAGE)
dirty->change_count++;
- } else {
+ else
dirty->change_count = 0;
- }
if (dirty->change_count > VMW_DIRTY_NUM_CHANGE_TRIGGERS) {
pgoff_t start = 0;
@@ -160,7 +160,7 @@ static void vmw_bo_dirty_scan_mkwrite(struct vmw_buffer_object *vbo)
*
* This function may change the dirty tracking method.
*/
-void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
+void vmw_bo_dirty_scan(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
@@ -181,12 +181,12 @@ void vmw_bo_dirty_scan(struct vmw_buffer_object *vbo)
* when calling unmap_mapping_range(). This function makes sure we pick
* up all dirty pages.
*/
-static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
+static void vmw_bo_dirty_pre_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
- struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
+ struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
if (dirty->method != VMW_BO_DIRTY_PAGETABLE || start >= end)
return;
@@ -206,11 +206,11 @@ static void vmw_bo_dirty_pre_unmap(struct vmw_buffer_object *vbo,
*
* This is similar to ttm_bo_unmap_virtual() except it takes a subrange.
*/
-void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
+void vmw_bo_dirty_unmap(struct vmw_bo *vbo,
pgoff_t start, pgoff_t end)
{
- unsigned long offset = drm_vma_node_start(&vbo->base.base.vma_node);
- struct address_space *mapping = vbo->base.bdev->dev_mapping;
+ unsigned long offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
+ struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
vmw_bo_dirty_pre_unmap(vbo, start, end);
unmap_shared_mapping_range(mapping, (offset + start) << PAGE_SHIFT,
@@ -227,10 +227,10 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
*
* Return: Zero on success, -ENOMEM on memory allocation failure.
*/
-int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
+int vmw_bo_dirty_add(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
- pgoff_t num_pages = PFN_UP(vbo->base.resource->size);
+ pgoff_t num_pages = PFN_UP(vbo->tbo.resource->size);
size_t size;
int ret;
@@ -253,8 +253,8 @@ int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
if (num_pages < PAGE_SIZE / sizeof(pte_t)) {
dirty->method = VMW_BO_DIRTY_PAGETABLE;
} else {
- struct address_space *mapping = vbo->base.bdev->dev_mapping;
- pgoff_t offset = drm_vma_node_start(&vbo->base.base.vma_node);
+ struct address_space *mapping = vbo->tbo.bdev->dev_mapping;
+ pgoff_t offset = drm_vma_node_start(&vbo->tbo.base.vma_node);
dirty->method = VMW_BO_DIRTY_MKWRITE;
@@ -284,7 +284,7 @@ out_no_dirty:
*
* Return: Zero on success, -ENOMEM on memory allocation failure.
*/
-void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
+void vmw_bo_dirty_release(struct vmw_bo *vbo)
{
struct vmw_bo_dirty *dirty = vbo->dirty;
@@ -306,11 +306,11 @@ void vmw_bo_dirty_release(struct vmw_buffer_object *vbo)
*/
void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
{
- struct vmw_buffer_object *vbo = res->backup;
+ struct vmw_bo *vbo = res->guest_memory_bo;
struct vmw_bo_dirty *dirty = vbo->dirty;
pgoff_t start, cur, end;
- unsigned long res_start = res->backup_offset;
- unsigned long res_end = res->backup_offset + res->backup_size;
+ unsigned long res_start = res->guest_memory_offset;
+ unsigned long res_end = res->guest_memory_offset + res->guest_memory_size;
WARN_ON_ONCE(res_start & ~PAGE_MASK);
res_start >>= PAGE_SHIFT;
@@ -351,9 +351,9 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res)
*/
void vmw_bo_dirty_clear_res(struct vmw_resource *res)
{
- unsigned long res_start = res->backup_offset;
- unsigned long res_end = res->backup_offset + res->backup_size;
- struct vmw_buffer_object *vbo = res->backup;
+ unsigned long res_start = res->guest_memory_offset;
+ unsigned long res_end = res->guest_memory_offset + res->guest_memory_size;
+ struct vmw_bo *vbo = res->guest_memory_bo;
struct vmw_bo_dirty *dirty = vbo->dirty;
res_start >>= PAGE_SHIFT;
@@ -380,8 +380,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
vm_fault_t ret;
unsigned long page_offset;
unsigned int save_flags;
- struct vmw_buffer_object *vbo =
- container_of(bo, typeof(*vbo), base);
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
/*
* mkwrite() doesn't handle the VM_FAULT_RETRY return value correctly.
@@ -419,8 +418,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
vma->vm_private_data;
- struct vmw_buffer_object *vbo =
- container_of(bo, struct vmw_buffer_object, base);
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
pgoff_t num_prefault;
pgprot_t prot;
vm_fault_t ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c7d645e5ec7b..71eeabf001c8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,9 +27,10 @@
#include <drm/ttm/ttm_placement.h>
-#include "vmwgfx_resource_priv.h"
#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
#define VMW_RES_EVICT_ERR_COUNT 10
@@ -39,10 +40,10 @@
*/
void vmw_resource_mob_attach(struct vmw_resource *res)
{
- struct vmw_buffer_object *backup = res->backup;
- struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
+ struct vmw_bo *gbo = res->guest_memory_bo;
+ struct rb_node **new = &gbo->res_tree.rb_node, *parent = NULL;
- dma_resv_assert_held(res->backup->base.base.resv);
+ dma_resv_assert_held(gbo->tbo.base.resv);
res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
res->func->prio;
@@ -51,14 +52,14 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
container_of(*new, struct vmw_resource, mob_node);
parent = *new;
- new = (res->backup_offset < this->backup_offset) ?
+ new = (res->guest_memory_offset < this->guest_memory_offset) ?
&((*new)->rb_left) : &((*new)->rb_right);
}
rb_link_node(&res->mob_node, parent, new);
- rb_insert_color(&res->mob_node, &backup->res_tree);
+ rb_insert_color(&res->mob_node, &gbo->res_tree);
- vmw_bo_prio_add(backup, res->used_prio);
+ vmw_bo_prio_add(gbo, res->used_prio);
}
/**
@@ -67,13 +68,13 @@ void vmw_resource_mob_attach(struct vmw_resource *res)
*/
void vmw_resource_mob_detach(struct vmw_resource *res)
{
- struct vmw_buffer_object *backup = res->backup;
+ struct vmw_bo *gbo = res->guest_memory_bo;
- dma_resv_assert_held(backup->base.base.resv);
+ dma_resv_assert_held(gbo->tbo.base.resv);
if (vmw_resource_mob_attached(res)) {
- rb_erase(&res->mob_node, &backup->res_tree);
+ rb_erase(&res->mob_node, &gbo->res_tree);
RB_CLEAR_NODE(&res->mob_node);
- vmw_bo_prio_del(backup, res->used_prio);
+ vmw_bo_prio_del(gbo, res->used_prio);
}
}
@@ -120,8 +121,8 @@ static void vmw_resource_release(struct kref *kref)
spin_lock(&dev_priv->resource_lock);
list_del_init(&res->lru_head);
spin_unlock(&dev_priv->resource_lock);
- if (res->backup) {
- struct ttm_buffer_object *bo = &res->backup->base;
+ if (res->guest_memory_bo) {
+ struct ttm_buffer_object *bo = &res->guest_memory_bo->tbo;
ret = ttm_bo_reserve(bo, false, false, NULL);
BUG_ON(ret);
@@ -133,14 +134,14 @@ static void vmw_resource_release(struct kref *kref)
val_buf.num_shared = 0;
res->func->unbind(res, false, &val_buf);
}
- res->backup_dirty = false;
+ res->guest_memory_size = false;
vmw_resource_mob_detach(res);
if (res->dirty)
res->func->dirty_free(res);
if (res->coherent)
- vmw_bo_dirty_release(res->backup);
+ vmw_bo_dirty_release(res->guest_memory_bo);
ttm_bo_unreserve(bo);
- vmw_bo_unreference(&res->backup);
+ vmw_bo_unreference(&res->guest_memory_bo);
}
if (likely(res->hw_destroy != NULL)) {
@@ -223,9 +224,9 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
INIT_LIST_HEAD(&res->lru_head);
INIT_LIST_HEAD(&res->binding_head);
res->id = -1;
- res->backup = NULL;
- res->backup_offset = 0;
- res->backup_dirty = false;
+ res->guest_memory_bo = NULL;
+ res->guest_memory_offset = 0;
+ res->guest_memory_dirty = false;
res->res_dirty = false;
res->coherent = false;
res->used_prio = 3;
@@ -263,7 +264,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
int ret = -EINVAL;
base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL))
+ if (unlikely(!base))
return -EINVAL;
if (unlikely(ttm_base_object_type(base) != converter->object_type))
@@ -290,7 +291,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct drm_file *filp,
uint32_t handle,
struct vmw_surface **out_surf,
- struct vmw_buffer_object **out_buf)
+ struct vmw_bo **out_buf)
{
struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
struct vmw_resource *res;
@@ -312,32 +313,36 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}
/**
- * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ * vmw_resource_buf_alloc - Allocate a guest memory buffer for a resource.
*
- * @res: The resource for which to allocate a backup buffer.
+ * @res: The resource for which to allocate a gbo buffer.
* @interruptible: Whether any sleeps during allocation should be
* performed while interruptible.
*/
static int vmw_resource_buf_alloc(struct vmw_resource *res,
bool interruptible)
{
- unsigned long size = PFN_ALIGN(res->backup_size);
- struct vmw_buffer_object *backup;
+ unsigned long size = PFN_ALIGN(res->guest_memory_size);
+ struct vmw_bo *gbo;
+ struct vmw_bo_params bo_params = {
+ .domain = res->func->domain,
+ .busy_domain = res->func->busy_domain,
+ .bo_type = ttm_bo_type_device,
+ .size = res->guest_memory_size,
+ .pin = false
+ };
int ret;
- if (likely(res->backup)) {
- BUG_ON(res->backup->base.base.size < size);
+ if (likely(res->guest_memory_bo)) {
+ BUG_ON(res->guest_memory_bo->tbo.base.size < size);
return 0;
}
- ret = vmw_bo_create(res->dev_priv, res->backup_size,
- res->func->backup_placement,
- interruptible, false,
- &vmw_bo_bo_free, &backup);
+ ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo);
if (unlikely(ret != 0))
goto out_no_bo;
- res->backup = backup;
+ res->guest_memory_bo = gbo;
out_no_bo:
return ret;
@@ -369,13 +374,13 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
}
if (func->bind &&
- ((func->needs_backup && !vmw_resource_mob_attached(res) &&
- val_buf->bo != NULL) ||
- (!func->needs_backup && val_buf->bo != NULL))) {
+ ((func->needs_guest_memory && !vmw_resource_mob_attached(res) &&
+ val_buf->bo) ||
+ (!func->needs_guest_memory && val_buf->bo))) {
ret = func->bind(res, val_buf);
if (unlikely(ret != 0))
goto out_bind_failed;
- if (func->needs_backup)
+ if (func->needs_guest_memory)
vmw_resource_mob_attach(res);
}
@@ -385,11 +390,11 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
*/
if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
!res->coherent) {
- if (res->backup->dirty && !res->dirty) {
+ if (res->guest_memory_bo->dirty && !res->dirty) {
ret = func->dirty_alloc(res);
if (ret)
return ret;
- } else if (!res->backup->dirty && res->dirty) {
+ } else if (!res->guest_memory_bo->dirty && res->dirty) {
func->dirty_free(res);
}
}
@@ -400,12 +405,12 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
*/
if (res->dirty) {
if (dirtying && !res->res_dirty) {
- pgoff_t start = res->backup_offset >> PAGE_SHIFT;
+ pgoff_t start = res->guest_memory_offset >> PAGE_SHIFT;
pgoff_t end = __KERNEL_DIV_ROUND_UP
- (res->backup_offset + res->backup_size,
+ (res->guest_memory_offset + res->guest_memory_size,
PAGE_SIZE);
- vmw_bo_dirty_unmap(res->backup, start, end);
+ vmw_bo_dirty_unmap(res->guest_memory_bo, start, end);
}
vmw_bo_dirty_transfer_to_res(res);
@@ -427,10 +432,10 @@ out_bind_failed:
* @res: Pointer to the struct vmw_resource to unreserve.
* @dirty_set: Change dirty status of the resource.
* @dirty: When changing dirty status indicates the new status.
- * @switch_backup: Backup buffer has been switched.
- * @new_backup: Pointer to new backup buffer if command submission
+ * @switch_guest_memory: Guest memory buffer has been switched.
+ * @new_guest_memory_bo: Pointer to new guest memory buffer if command submission
* switched. May be NULL.
- * @new_backup_offset: New backup offset if @switch_backup is true.
+ * @new_guest_memory_offset: New gbo offset if @switch_guest_memory is true.
*
* Currently unreserving a resource means putting it back on the device's
* resource lru list, so that it can be evicted if necessary.
@@ -438,42 +443,42 @@ out_bind_failed:
void vmw_resource_unreserve(struct vmw_resource *res,
bool dirty_set,
bool dirty,
- bool switch_backup,
- struct vmw_buffer_object *new_backup,
- unsigned long new_backup_offset)
+ bool switch_guest_memory,
+ struct vmw_bo *new_guest_memory_bo,
+ unsigned long new_guest_memory_offset)
{
struct vmw_private *dev_priv = res->dev_priv;
if (!list_empty(&res->lru_head))
return;
- if (switch_backup && new_backup != res->backup) {
- if (res->backup) {
+ if (switch_guest_memory && new_guest_memory_bo != res->guest_memory_bo) {
+ if (res->guest_memory_bo) {
vmw_resource_mob_detach(res);
if (res->coherent)
- vmw_bo_dirty_release(res->backup);
- vmw_bo_unreference(&res->backup);
+ vmw_bo_dirty_release(res->guest_memory_bo);
+ vmw_bo_unreference(&res->guest_memory_bo);
}
- if (new_backup) {
- res->backup = vmw_bo_reference(new_backup);
+ if (new_guest_memory_bo) {
+ res->guest_memory_bo = vmw_bo_reference(new_guest_memory_bo);
/*
* The validation code should already have added a
* dirty tracker here.
*/
- WARN_ON(res->coherent && !new_backup->dirty);
+ WARN_ON(res->coherent && !new_guest_memory_bo->dirty);
vmw_resource_mob_attach(res);
} else {
- res->backup = NULL;
+ res->guest_memory_bo = NULL;
}
- } else if (switch_backup && res->coherent) {
- vmw_bo_dirty_release(res->backup);
+ } else if (switch_guest_memory && res->coherent) {
+ vmw_bo_dirty_release(res->guest_memory_bo);
}
- if (switch_backup)
- res->backup_offset = new_backup_offset;
+ if (switch_guest_memory)
+ res->guest_memory_offset = new_guest_memory_offset;
if (dirty_set)
res->res_dirty = dirty;
@@ -507,30 +512,32 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
{
struct ttm_operation_ctx ctx = { true, false };
struct list_head val_list;
- bool backup_dirty = false;
+ bool guest_memory_dirty = false;
int ret;
- if (unlikely(res->backup == NULL)) {
+ if (unlikely(!res->guest_memory_bo)) {
ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0))
return ret;
}
INIT_LIST_HEAD(&val_list);
- ttm_bo_get(&res->backup->base);
- val_buf->bo = &res->backup->base;
+ ttm_bo_get(&res->guest_memory_bo->tbo);
+ val_buf->bo = &res->guest_memory_bo->tbo;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
- if (res->func->needs_backup && !vmw_resource_mob_attached(res))
+ if (res->func->needs_guest_memory && !vmw_resource_mob_attached(res))
return 0;
- backup_dirty = res->backup_dirty;
- ret = ttm_bo_validate(&res->backup->base,
- res->func->backup_placement,
+ guest_memory_dirty = res->guest_memory_dirty;
+ vmw_bo_placement_set(res->guest_memory_bo, res->func->domain,
+ res->func->busy_domain);
+ ret = ttm_bo_validate(&res->guest_memory_bo->tbo,
+ &res->guest_memory_bo->placement,
&ctx);
if (unlikely(ret != 0))
@@ -543,8 +550,8 @@ out_no_validate:
out_no_reserve:
ttm_bo_put(val_buf->bo);
val_buf->bo = NULL;
- if (backup_dirty)
- vmw_bo_unreference(&res->backup);
+ if (guest_memory_dirty)
+ vmw_bo_unreference(&res->guest_memory_bo);
return ret;
}
@@ -555,12 +562,13 @@ out_no_reserve:
* @res: The resource to reserve.
*
* This function takes the resource off the LRU list and make sure
- * a backup buffer is present for guest-backed resources. However,
- * the buffer may not be bound to the resource at this point.
+ * a guest memory buffer is present for guest-backed resources.
+ * However, the buffer may not be bound to the resource at this
+ * point.
*
*/
int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
- bool no_backup)
+ bool no_guest_memory)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
@@ -569,13 +577,13 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
list_del_init(&res->lru_head);
spin_unlock(&dev_priv->resource_lock);
- if (res->func->needs_backup && res->backup == NULL &&
- !no_backup) {
+ if (res->func->needs_guest_memory && !res->guest_memory_bo &&
+ !no_guest_memory) {
ret = vmw_resource_buf_alloc(res, interruptible);
if (unlikely(ret != 0)) {
- DRM_ERROR("Failed to allocate a backup buffer "
+ DRM_ERROR("Failed to allocate a guest memory buffer "
"of size %lu. bytes\n",
- (unsigned long) res->backup_size);
+ (unsigned long) res->guest_memory_size);
return ret;
}
}
@@ -585,10 +593,10 @@ int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
/**
* vmw_resource_backoff_reservation - Unreserve and unreference a
- * backup buffer
+ * guest memory buffer
*.
* @ticket: The ww acquire ctx used for reservation.
- * @val_buf: Backup buffer information.
+ * @val_buf: Guest memory buffer information.
*/
static void
vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
@@ -630,14 +638,14 @@ static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
return ret;
if (unlikely(func->unbind != NULL &&
- (!func->needs_backup || vmw_resource_mob_attached(res)))) {
+ (!func->needs_guest_memory || vmw_resource_mob_attached(res)))) {
ret = func->unbind(res, res->res_dirty, &val_buf);
if (unlikely(ret != 0))
goto out_no_unbind;
vmw_resource_mob_detach(res);
}
ret = func->destroy(res);
- res->backup_dirty = true;
+ res->guest_memory_dirty = true;
res->res_dirty = false;
out_no_unbind:
vmw_resource_backoff_reservation(ticket, &val_buf);
@@ -676,8 +684,8 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
val_buf.bo = NULL;
val_buf.num_shared = 0;
- if (res->backup)
- val_buf.bo = &res->backup->base;
+ if (res->guest_memory_bo)
+ val_buf.bo = &res->guest_memory_bo->tbo;
do {
ret = vmw_resource_do_validate(res, &val_buf, dirtying);
if (likely(ret != -EBUSY))
@@ -717,9 +725,9 @@ int vmw_resource_validate(struct vmw_resource *res, bool intr,
if (unlikely(ret != 0))
goto out_no_validate;
- else if (!res->func->needs_backup && res->backup) {
+ else if (!res->func->needs_guest_memory && res->guest_memory_bo) {
WARN_ON_ONCE(vmw_resource_mob_attached(res));
- vmw_bo_unreference(&res->backup);
+ vmw_bo_unreference(&res->guest_memory_bo);
}
return 0;
@@ -740,14 +748,14 @@ out_no_validate:
* validation code, since resource validation and eviction
* both require the backup buffer to be reserved.
*/
-void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
+void vmw_resource_unbind_list(struct vmw_bo *vbo)
{
struct ttm_validate_buffer val_buf = {
- .bo = &vbo->base,
+ .bo = &vbo->tbo,
.num_shared = 0
};
- dma_resv_assert_held(vbo->base.base.resv);
+ dma_resv_assert_held(vbo->tbo.base.resv);
while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
struct rb_node *node = vbo->res_tree.rb_node;
struct vmw_resource *res =
@@ -756,12 +764,12 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
if (!WARN_ON_ONCE(!res->func->unbind))
(void) res->func->unbind(res, res->res_dirty, &val_buf);
- res->backup_dirty = true;
+ res->guest_memory_size = true;
res->res_dirty = false;
vmw_resource_mob_detach(res);
}
- (void) ttm_bo_wait(&vbo->base, false, false);
+ (void) ttm_bo_wait(&vbo->tbo, false, false);
}
@@ -773,7 +781,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
* Read back cached states from the device if they exist. This function
* assumes binding_mutex is held.
*/
-int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
+int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
{
struct vmw_resource *dx_query_ctx;
struct vmw_private *dev_priv;
@@ -822,20 +830,19 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
struct ttm_resource *old_mem,
struct ttm_resource *new_mem)
{
- struct vmw_buffer_object *dx_query_mob;
+ struct vmw_bo *dx_query_mob;
struct ttm_device *bdev = bo->bdev;
- struct vmw_private *dev_priv;
-
- dev_priv = container_of(bdev, struct vmw_private, bdev);
+ struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
mutex_lock(&dev_priv->binding_mutex);
/* If BO is being moved from MOB to system memory */
- if (new_mem->mem_type == TTM_PL_SYSTEM &&
+ if (old_mem &&
+ new_mem->mem_type == TTM_PL_SYSTEM &&
old_mem->mem_type == VMW_PL_MOB) {
struct vmw_fence_obj *fence;
- dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
+ dx_query_mob = to_vmw_bo(&bo->base);
if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
mutex_unlock(&dev_priv->binding_mutex);
return;
@@ -863,7 +870,7 @@ void vmw_query_move_notify(struct ttm_buffer_object *bo,
*/
bool vmw_resource_needs_backup(const struct vmw_resource *res)
{
- return res->func->needs_backup;
+ return res->func->needs_guest_memory;
}
/**
@@ -959,21 +966,24 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
goto out_no_reserve;
if (res->pin_count == 0) {
- struct vmw_buffer_object *vbo = NULL;
+ struct vmw_bo *vbo = NULL;
- if (res->backup) {
- vbo = res->backup;
+ if (res->guest_memory_bo) {
+ vbo = res->guest_memory_bo;
- ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
+ ret = ttm_bo_reserve(&vbo->tbo, interruptible, false, NULL);
if (ret)
goto out_no_validate;
- if (!vbo->base.pin_count) {
+ if (!vbo->tbo.pin_count) {
+ vmw_bo_placement_set(vbo,
+ res->func->domain,
+ res->func->busy_domain);
ret = ttm_bo_validate
- (&vbo->base,
- res->func->backup_placement,
+ (&vbo->tbo,
+ &vbo->placement,
&ctx);
if (ret) {
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
goto out_no_validate;
}
}
@@ -983,7 +993,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
}
ret = vmw_resource_validate(res, interruptible, true);
if (vbo)
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
if (ret)
goto out_no_validate;
}
@@ -1016,12 +1026,12 @@ void vmw_resource_unpin(struct vmw_resource *res)
WARN_ON(ret);
WARN_ON(res->pin_count == 0);
- if (--res->pin_count == 0 && res->backup) {
- struct vmw_buffer_object *vbo = res->backup;
+ if (--res->pin_count == 0 && res->guest_memory_bo) {
+ struct vmw_bo *vbo = res->guest_memory_bo;
- (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
+ (void) ttm_bo_reserve(&vbo->tbo, false, false, NULL);
vmw_bo_pin_reserved(vbo, false);
- ttm_bo_unreserve(&vbo->base);
+ ttm_bo_unreserve(&vbo->tbo);
}
vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
@@ -1062,7 +1072,7 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
* @num_prefault: Returns how many pages including the first have been
* cleaned and are ok to prefault
*/
-int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
+int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
pgoff_t end, pgoff_t *num_prefault)
{
struct rb_node *cur = vbo->res_tree.rb_node;
@@ -1079,9 +1089,9 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
struct vmw_resource *cur_res =
container_of(cur, struct vmw_resource, mob_node);
- if (cur_res->backup_offset >= res_end) {
+ if (cur_res->guest_memory_offset >= res_end) {
cur = cur->rb_left;
- } else if (cur_res->backup_offset + cur_res->backup_size <=
+ } else if (cur_res->guest_memory_offset + cur_res->guest_memory_size <=
res_start) {
cur = cur->rb_right;
} else {
@@ -1092,7 +1102,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
}
/*
- * In order of increasing backup_offset, clean dirty resources
+ * In order of increasing guest_memory_offset, clean dirty resources
* intersecting the range.
*/
while (found) {
@@ -1108,13 +1118,13 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
found->res_dirty = false;
}
- last_cleaned = found->backup_offset + found->backup_size;
+ last_cleaned = found->guest_memory_offset + found->guest_memory_size;
cur = rb_next(&found->mob_node);
if (!cur)
break;
found = container_of(cur, struct vmw_resource, mob_node);
- if (found->backup_offset >= res_end)
+ if (found->guest_memory_offset >= res_end)
break;
}
@@ -1123,7 +1133,7 @@ int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
*/
*num_prefault = 1;
if (last_cleaned > res_start) {
- struct ttm_buffer_object *bo = &vbo->base;
+ struct ttm_buffer_object *bo = &vbo->tbo;
*num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
PAGE_SIZE);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
index 3b7438b2d289..aa7cbd396bea 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
@@ -58,10 +58,11 @@ struct vmw_user_resource_conv {
* struct vmw_res_func - members and functions common for a resource type
*
* @res_type: Enum that identifies the lru list to use for eviction.
- * @needs_backup: Whether the resource is guest-backed and needs
+ * @needs_guest_memory:Whether the resource is guest-backed and needs
* persistent buffer storage.
* @type_name: String that identifies the resource type.
- * @backup_placement: TTM placement for backup buffers.
+ * @domain: TTM placement for guest memory buffers.
+ * @busy_domain: TTM busy placement for guest memory buffers.
* @may_evict Whether the resource may be evicted.
* @create: Create a hardware resource.
* @destroy: Destroy a hardware resource.
@@ -81,9 +82,10 @@ struct vmw_user_resource_conv {
*/
struct vmw_res_func {
enum vmw_res_type res_type;
- bool needs_backup;
+ bool needs_guest_memory;
const char *type_name;
- struct ttm_placement *backup_placement;
+ u32 domain;
+ u32 busy_domain;
bool may_evict;
u32 prio;
u32 dirty_prio;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index e1f36a09c59c..556a403b7eb5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2011-2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,13 +25,14 @@
*
**************************************************************************/
+#include "vmwgfx_bo.h"
+#include "vmwgfx_kms.h"
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include "vmwgfx_kms.h"
-
#define vmw_crtc_to_sou(x) \
container_of(x, struct vmw_screen_object_unit, base.crtc)
#define vmw_encoder_to_sou(x) \
@@ -89,7 +90,7 @@ struct vmw_screen_object_unit {
struct vmw_display_unit base;
unsigned long buffer_size; /**< Size of allocated buffer */
- struct vmw_buffer_object *buffer; /**< Backing store buffer */
+ struct vmw_bo *buffer; /**< Backing store buffer */
bool defined;
};
@@ -148,7 +149,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
sou->base.set_gui_y = cmd->obj.root.y;
/* Ok to assume that buffer is pinned in vram */
- vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
+ vmw_bo_get_guest_ptr(&sou->buffer->tbo, &cmd->obj.backingStore.ptr);
cmd->obj.backingStore.pitch = mode->hdisplay * 4;
vmw_cmd_commit(dev_priv, fifo_size);
@@ -409,9 +410,13 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc;
struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state);
struct vmw_private *dev_priv;
- size_t size;
int ret;
-
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_VRAM,
+ .busy_domain = VMW_BO_DOMAIN_VRAM,
+ .bo_type = ttm_bo_type_device,
+ .pin = true
+ };
if (!new_fb) {
vmw_bo_unreference(&vps->bo);
@@ -420,11 +425,11 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
return 0;
}
- size = new_state->crtc_w * new_state->crtc_h * 4;
+ bo_params.size = new_state->crtc_w * new_state->crtc_h * 4;
dev_priv = vmw_priv(crtc->dev);
if (vps->bo) {
- if (vps->bo_size == size) {
+ if (vps->bo_size == bo_params.size) {
/*
* Note that this might temporarily up the pin-count
* to 2, until cleanup_fb() is called.
@@ -443,16 +448,12 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane,
* resume the overlays, this is preferred to failing to alloc.
*/
vmw_overlay_pause_all(dev_priv);
- ret = vmw_bo_create(dev_priv, size,
- &vmw_vram_placement,
- false, true, &vmw_bo_bo_free, &vps->bo);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vps->bo);
vmw_overlay_resume_all(dev_priv);
- if (ret) {
- vps->bo = NULL; /* vmw_bo_init frees on error */
+ if (ret)
return ret;
- }
- vps->bo_size = size;
+ vps->bo_size = bo_params.size;
/*
* TTM already thinks the buffer is pinned, but make sure the
@@ -489,7 +490,7 @@ static uint32_t vmw_sou_bo_define_gmrfb(struct vmw_du_update_plane *update,
gmr->body.format.colorDepth = depth;
gmr->body.format.reserved = 0;
gmr->body.bytesPerLine = update->vfb->base.pitches[0];
- vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &gmr->body.ptr);
+ vmw_bo_get_guest_ptr(&vfbbo->buffer->tbo, &gmr->body.ptr);
return sizeof(*gmr);
}
@@ -546,7 +547,6 @@ static int vmw_sou_plane_update_bo(struct vmw_private *dev_priv,
bo_update.base.vfb = vfb;
bo_update.base.out_fence = out_fence;
bo_update.base.mutex = NULL;
- bo_update.base.cpu_blit = false;
bo_update.base.intr = true;
bo_update.base.calc_fifo_size = vmw_sou_bo_fifo_size;
@@ -707,7 +707,6 @@ static int vmw_sou_plane_update_surface(struct vmw_private *dev_priv,
srf_update.base.vfb = vfb;
srf_update.base.out_fence = out_fence;
srf_update.base.mutex = &dev_priv->cmdbuf_mutex;
- srf_update.base.cpu_blit = false;
srf_update.base.intr = true;
srf_update.base.calc_fifo_size = vmw_sou_surface_fifo_size;
@@ -947,7 +946,7 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv)
static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
- struct vmw_buffer_object *buf =
+ struct vmw_bo *buf =
container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
int depth = framebuffer->base.format->depth;
@@ -973,7 +972,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
/* Buffer is reserved in vram or GMR */
- vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr);
+ vmw_bo_get_guest_ptr(&buf->tbo, &cmd->body.ptr);
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -1216,14 +1215,16 @@ int vmw_kms_sou_do_bo_dirty(struct vmw_private *dev_priv,
struct vmw_fence_obj **out_fence,
struct drm_crtc *crtc)
{
- struct vmw_buffer_object *buf =
+ struct vmw_bo *buf =
container_of(framebuffer, struct vmw_framebuffer_bo,
base)->buffer;
struct vmw_kms_dirty dirty;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
- ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
+ vmw_bo_placement_set(buf, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, buf);
if (ret)
return ret;
@@ -1323,13 +1324,15 @@ int vmw_kms_sou_readback(struct vmw_private *dev_priv,
uint32_t num_clips,
struct drm_crtc *crtc)
{
- struct vmw_buffer_object *buf =
+ struct vmw_bo *buf =
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_kms_dirty dirty;
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
int ret;
- ret = vmw_validation_add_bo(&val_ctx, buf, false, false);
+ vmw_bo_placement_set(buf, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
+ VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
+ ret = vmw_validation_add_bo(&val_ctx, buf);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 51e83dfa1cac..e7226db8b242 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -27,9 +27,10 @@
#include <drm/ttm/ttm_placement.h>
+#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
-#include "vmwgfx_binding.h"
struct vmw_shader {
struct vmw_resource res;
@@ -88,12 +89,13 @@ const struct vmw_user_resource_conv *user_shader_converter =
static const struct vmw_res_func vmw_gb_shader_func = {
.res_type = vmw_res_shader,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "guest backed shaders",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_shader_create,
.destroy = vmw_gb_shader_destroy,
.bind = vmw_gb_shader_bind,
@@ -102,12 +104,13 @@ static const struct vmw_res_func vmw_gb_shader_func = {
static const struct vmw_res_func vmw_dx_shader_func = {
.res_type = vmw_res_shader,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 3,
.dirty_prio = 3,
.type_name = "dx shaders",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_shader_create,
/*
* The destroy callback is only called with a committed resource on
@@ -158,7 +161,7 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
SVGA3dShaderType type,
uint8_t num_input_sig,
uint8_t num_output_sig,
- struct vmw_buffer_object *byte_code,
+ struct vmw_bo *byte_code,
void (*res_free) (struct vmw_resource *res))
{
struct vmw_shader *shader = vmw_res_to_shader(res);
@@ -175,10 +178,10 @@ static int vmw_gb_shader_init(struct vmw_private *dev_priv,
return ret;
}
- res->backup_size = size;
+ res->guest_memory_size = size;
if (byte_code) {
- res->backup = vmw_bo_reference(byte_code);
- res->backup_offset = offset;
+ res->guest_memory_bo = vmw_bo_reference(byte_code);
+ res->guest_memory_offset = offset;
}
shader->size = size;
shader->type = type;
@@ -259,8 +262,8 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
cmd->body.mobid = bo->resource->start;
- cmd->body.offsetInBytes = res->backup_offset;
- res->backup_dirty = false;
+ cmd->body.offsetInBytes = res->guest_memory_offset;
+ res->guest_memory_dirty = false;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -277,7 +280,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
} *cmd;
struct vmw_fence_obj *fence;
- BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
+ BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB);
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL))
@@ -397,8 +400,8 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = shader->ctx->id;
cmd->body.shid = shader->id;
- cmd->body.mobid = res->backup->base.resource->start;
- cmd->body.offsetInBytes = res->backup_offset;
+ cmd->body.mobid = res->guest_memory_bo->tbo.resource->start;
+ cmd->body.offsetInBytes = res->guest_memory_offset;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
vmw_cotable_add_resource(shader->cotable, &shader->cotable_head);
@@ -508,7 +511,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
struct vmw_fence_obj *fence;
int ret;
- BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
+ BUG_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
ret = vmw_dx_shader_scrub(res);
@@ -680,7 +683,7 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
}
static int vmw_user_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buffer,
+ struct vmw_bo *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
@@ -734,7 +737,7 @@ out:
static struct vmw_resource *vmw_shader_alloc(struct vmw_private *dev_priv,
- struct vmw_buffer_object *buffer,
+ struct vmw_bo *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type)
@@ -771,7 +774,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_buffer_object *buffer = NULL;
+ struct vmw_bo *buffer = NULL;
SVGA3dShaderType shader_type;
int ret;
@@ -782,7 +785,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
return ret;
}
- if ((u64)buffer->base.base.size < (u64)size + (u64)offset) {
+ if ((u64)buffer->tbo.base.size < (u64)size + (u64)offset) {
VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
@@ -807,7 +810,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
num_output_sig, tfile, shader_handle);
out_bad_arg:
vmw_bo_unreference(&buffer);
- drm_gem_object_put(&buffer->base.base);
+ drm_gem_object_put(&buffer->tbo.base);
return ret;
}
@@ -884,28 +887,34 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
struct list_head *list)
{
struct ttm_operation_ctx ctx = { false, true };
- struct vmw_buffer_object *buf;
+ struct vmw_bo *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
int ret;
struct vmw_resource *res;
+ struct vmw_bo_params bo_params = {
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
+ .bo_type = ttm_bo_type_device,
+ .size = size,
+ .pin = true
+ };
if (!vmw_shader_id_ok(user_key, shader_type))
return -EINVAL;
- ret = vmw_bo_create(dev_priv, size, &vmw_sys_placement,
- true, true, vmw_bo_bo_free, &buf);
+ ret = vmw_bo_create(dev_priv, &bo_params, &buf);
if (unlikely(ret != 0))
goto out;
- ret = ttm_bo_reserve(&buf->base, false, true, NULL);
+ ret = ttm_bo_reserve(&buf->tbo, false, true, NULL);
if (unlikely(ret != 0))
goto no_reserve;
/* Map and copy shader bytecode. */
- ret = ttm_bo_kmap(&buf->base, 0, PFN_UP(size), &map);
+ ret = ttm_bo_kmap(&buf->tbo, 0, PFN_UP(size), &map);
if (unlikely(ret != 0)) {
- ttm_bo_unreserve(&buf->base);
+ ttm_bo_unreserve(&buf->tbo);
goto no_reserve;
}
@@ -913,9 +922,9 @@ int vmw_compat_shader_add(struct vmw_private *dev_priv,
WARN_ON(is_iomem);
ttm_bo_kunmap(&map);
- ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx);
+ ret = ttm_bo_validate(&buf->tbo, &buf->placement, &ctx);
WARN_ON(ret != 0);
- ttm_bo_unreserve(&buf->base);
+ ttm_bo_unreserve(&buf->tbo);
res = vmw_shader_alloc(dev_priv, buf, size, 0, shader_type);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 4ea32b01efc0..5af4db6d1f18 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -24,6 +24,7 @@
*
**************************************************************************/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -81,10 +82,11 @@ static void vmw_view_commit_notify(struct vmw_resource *res,
static const struct vmw_res_func vmw_view_func = {
.res_type = vmw_res_view,
- .needs_backup = false,
+ .needs_guest_memory = false,
.may_evict = false,
.type_name = "DX view",
- .backup_placement = NULL,
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
.create = vmw_view_create,
.commit_notify = vmw_view_commit_notify,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 0090abe89254..d79a6eccfaa4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/******************************************************************************
*
- * COPYRIGHT (C) 2014-2022 VMware, Inc., Palo Alto, CA., USA
+ * COPYRIGHT (C) 2014-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,14 +25,15 @@
*
******************************************************************************/
+#include "vmwgfx_bo.h"
+#include "vmwgfx_kms.h"
+#include "vmw_surface_cache.h"
+
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fourcc.h>
-#include "vmwgfx_kms.h"
-#include "vmw_surface_cache.h"
-
#define vmw_crtc_to_stdu(x) \
container_of(x, struct vmw_screen_target_display_unit, base.crtc)
#define vmw_encoder_to_stdu(x) \
@@ -65,12 +66,11 @@ enum stdu_content_type {
*/
struct vmw_stdu_dirty {
struct vmw_kms_dirty base;
- SVGA3dTransferType transfer;
s32 left, right, top, bottom;
s32 fb_left, fb_top;
u32 pitch;
union {
- struct vmw_buffer_object *buf;
+ struct vmw_bo *buf;
u32 sid;
};
};
@@ -136,12 +136,6 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu);
* Screen Target Display Unit CRTC Functions
*****************************************************************************/
-static bool vmw_stdu_use_cpu_blit(const struct vmw_private *vmw)
-{
- return !(vmw->capabilities & SVGA_CAP_3D) || vmw->vram_size < (32 * 1024 * 1024);
-}
-
-
/**
* vmw_stdu_crtc_destroy - cleans up the STDU
*
@@ -451,93 +445,6 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc,
}
/**
- * vmw_stdu_bo_clip - Callback to encode a suface DMA command cliprect
- *
- * @dirty: The closure structure.
- *
- * Encodes a surface DMA command cliprect and updates the bounding box
- * for the DMA.
- */
-static void vmw_stdu_bo_clip(struct vmw_kms_dirty *dirty)
-{
- struct vmw_stdu_dirty *ddirty =
- container_of(dirty, struct vmw_stdu_dirty, base);
- struct vmw_stdu_dma *cmd = dirty->cmd;
- struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
-
- blit += dirty->num_hits;
- blit->srcx = dirty->fb_x;
- blit->srcy = dirty->fb_y;
- blit->x = dirty->unit_x1;
- blit->y = dirty->unit_y1;
- blit->d = 1;
- blit->w = dirty->unit_x2 - dirty->unit_x1;
- blit->h = dirty->unit_y2 - dirty->unit_y1;
- dirty->num_hits++;
-
- if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM)
- return;
-
- /* Destination bounding box */
- ddirty->left = min_t(s32, ddirty->left, dirty->unit_x1);
- ddirty->top = min_t(s32, ddirty->top, dirty->unit_y1);
- ddirty->right = max_t(s32, ddirty->right, dirty->unit_x2);
- ddirty->bottom = max_t(s32, ddirty->bottom, dirty->unit_y2);
-}
-
-/**
- * vmw_stdu_bo_fifo_commit - Callback to fill in and submit a DMA command.
- *
- * @dirty: The closure structure.
- *
- * Fills in the missing fields in a DMA command, and optionally encodes
- * a screen target update command, depending on transfer direction.
- */
-static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
-{
- struct vmw_stdu_dirty *ddirty =
- container_of(dirty, struct vmw_stdu_dirty, base);
- struct vmw_screen_target_display_unit *stdu =
- container_of(dirty->unit, typeof(*stdu), base);
- struct vmw_stdu_dma *cmd = dirty->cmd;
- struct SVGA3dCopyBox *blit = (struct SVGA3dCopyBox *) &cmd[1];
- SVGA3dCmdSurfaceDMASuffix *suffix =
- (SVGA3dCmdSurfaceDMASuffix *) &blit[dirty->num_hits];
- size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix);
-
- if (!dirty->num_hits) {
- vmw_cmd_commit(dirty->dev_priv, 0);
- return;
- }
-
- cmd->header.id = SVGA_3D_CMD_SURFACE_DMA;
- cmd->header.size = sizeof(cmd->body) + blit_size;
- vmw_bo_get_guest_ptr(&ddirty->buf->base, &cmd->body.guest.ptr);
- cmd->body.guest.pitch = ddirty->pitch;
- cmd->body.host.sid = stdu->display_srf->res.id;
- cmd->body.host.face = 0;
- cmd->body.host.mipmap = 0;
- cmd->body.transfer = ddirty->transfer;
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = ddirty->buf->base.base.size;
-
- if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) {
- blit_size += sizeof(struct vmw_stdu_update);
-
- vmw_stdu_populate_update(&suffix[1], stdu->base.unit,
- ddirty->left, ddirty->right,
- ddirty->top, ddirty->bottom);
- }
-
- vmw_cmd_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
-
- stdu->display_srf->res.res_dirty = true;
- ddirty->left = ddirty->top = S32_MAX;
- ddirty->right = ddirty->bottom = S32_MIN;
-}
-
-
-/**
* vmw_stdu_bo_cpu_clip - Callback to encode a CPU blit
*
* @dirty: The closure structure.
@@ -597,62 +504,21 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
return;
/* Assume we are blitting from Guest (bo) to Host (display_srf) */
- dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
- dst_bo = &stdu->display_srf->res.backup->base;
- dst_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
-
- src_pitch = ddirty->pitch;
- src_bo = &ddirty->buf->base;
- src_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp;
-
- /* Swap src and dst if the assumption was wrong. */
- if (ddirty->transfer != SVGA3D_WRITE_HOST_VRAM) {
- swap(dst_pitch, src_pitch);
- swap(dst_bo, src_bo);
- swap(src_offset, dst_offset);
- }
+ src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
+ src_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
+ src_offset = ddirty->top * dst_pitch + ddirty->left * stdu->cpp;
+
+ dst_pitch = ddirty->pitch;
+ dst_bo = &ddirty->buf->tbo;
+ dst_offset = ddirty->fb_top * src_pitch + ddirty->fb_left * stdu->cpp;
(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
src_bo, src_offset, src_pitch,
width * stdu->cpp, height, &diff);
-
- if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM &&
- drm_rect_visible(&diff.rect)) {
- struct vmw_private *dev_priv;
- struct vmw_stdu_update *cmd;
- struct drm_clip_rect region;
- int ret;
-
- /* We are updating the actual surface, not a proxy */
- region.x1 = diff.rect.x1;
- region.x2 = diff.rect.x2;
- region.y1 = diff.rect.y1;
- region.y2 = diff.rect.y2;
- ret = vmw_kms_update_proxy(&stdu->display_srf->res, &region,
- 1, 1);
- if (ret)
- goto out_cleanup;
-
-
- dev_priv = vmw_priv(stdu->base.crtc.dev);
- cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
- if (!cmd)
- goto out_cleanup;
-
- vmw_stdu_populate_update(cmd, stdu->base.unit,
- region.x1, region.x2,
- region.y1, region.y2);
-
- vmw_cmd_commit(dev_priv, sizeof(*cmd));
- }
-
-out_cleanup:
- ddirty->left = ddirty->top = ddirty->fb_left = ddirty->fb_top = S32_MAX;
- ddirty->right = ddirty->bottom = S32_MIN;
}
/**
- * vmw_kms_stdu_dma - Perform a DMA transfer between a buffer-object backed
+ * vmw_kms_stdu_readback - Perform a readback from a buffer-object backed
* framebuffer and the screen target system.
*
* @dev_priv: Pointer to the device private structure.
@@ -665,9 +531,6 @@ out_cleanup:
* be NULL.
* @num_clips: Number of clip rects in @clips or @vclips.
* @increment: Increment to use when looping over @clips or @vclips.
- * @to_surface: Whether to DMA to the screen target system as opposed to
- * from the screen target system.
- * @interruptible: Whether to perform waits interruptible if possible.
* @crtc: If crtc is passed, perform stdu dma on that crtc only.
*
* If DMA-ing till the screen target system, the function will also notify
@@ -676,59 +539,49 @@ out_cleanup:
* Returns 0 on success, negative error code on failure. -ERESTARTSYS if
* interrupted.
*/
-int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
- struct drm_file *file_priv,
- struct vmw_framebuffer *vfb,
- struct drm_vmw_fence_rep __user *user_fence_rep,
- struct drm_clip_rect *clips,
- struct drm_vmw_rect *vclips,
- uint32_t num_clips,
- int increment,
- bool to_surface,
- bool interruptible,
- struct drm_crtc *crtc)
+int vmw_kms_stdu_readback(struct vmw_private *dev_priv,
+ struct drm_file *file_priv,
+ struct vmw_framebuffer *vfb,
+ struct drm_vmw_fence_rep __user *user_fence_rep,
+ struct drm_clip_rect *clips,
+ struct drm_vmw_rect *vclips,
+ uint32_t num_clips,
+ int increment,
+ struct drm_crtc *crtc)
{
- struct vmw_buffer_object *buf =
+ struct vmw_bo *buf =
container_of(vfb, struct vmw_framebuffer_bo, base)->buffer;
struct vmw_stdu_dirty ddirty;
int ret;
- bool cpu_blit = vmw_stdu_use_cpu_blit(dev_priv);
DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
/*
- * VMs without 3D support don't have the surface DMA command and
- * we'll be using a CPU blit, and the framebuffer should be moved out
- * of VRAM.
+ * The GMR domain might seem confusing because it might seem like it should
+ * never happen with screen targets but e.g. the xorg vmware driver issues
+ * CMD_SURFACE_DMA for various pixmap updates which might transition our bo to
+ * a GMR. Instead of forcing another transition we can optimize the readback
+ * by reading directly from the GMR.
*/
- ret = vmw_validation_add_bo(&val_ctx, buf, false, cpu_blit);
+ vmw_bo_placement_set(buf,
+ VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_GMR,
+ VMW_BO_DOMAIN_MOB | VMW_BO_DOMAIN_SYS | VMW_BO_DOMAIN_GMR);
+ ret = vmw_validation_add_bo(&val_ctx, buf);
if (ret)
return ret;
- ret = vmw_validation_prepare(&val_ctx, NULL, interruptible);
+ ret = vmw_validation_prepare(&val_ctx, NULL, true);
if (ret)
goto out_unref;
- ddirty.transfer = (to_surface) ? SVGA3D_WRITE_HOST_VRAM :
- SVGA3D_READ_HOST_VRAM;
ddirty.left = ddirty.top = S32_MAX;
ddirty.right = ddirty.bottom = S32_MIN;
ddirty.fb_left = ddirty.fb_top = S32_MAX;
ddirty.pitch = vfb->base.pitches[0];
ddirty.buf = buf;
- ddirty.base.fifo_commit = vmw_stdu_bo_fifo_commit;
- ddirty.base.clip = vmw_stdu_bo_clip;
- ddirty.base.fifo_reserve_size = sizeof(struct vmw_stdu_dma) +
- num_clips * sizeof(SVGA3dCopyBox) +
- sizeof(SVGA3dCmdSurfaceDMASuffix);
- if (to_surface)
- ddirty.base.fifo_reserve_size += sizeof(struct vmw_stdu_update);
-
-
- if (cpu_blit) {
- ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
- ddirty.base.clip = vmw_stdu_bo_cpu_clip;
- ddirty.base.fifo_reserve_size = 0;
- }
+
+ ddirty.base.fifo_commit = vmw_stdu_bo_cpu_commit;
+ ddirty.base.clip = vmw_stdu_bo_cpu_clip;
+ ddirty.base.fifo_reserve_size = 0;
ddirty.base.crtc = crtc;
@@ -1160,11 +1013,8 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
/*
* This should only happen if the buffer object is too large to create a
* proxy surface for.
- * If we are a 2D VM with a buffer object then we have to use CPU blit
- * so cache these mappings
*/
- if (vps->content_fb_type == SEPARATE_BO &&
- vmw_stdu_use_cpu_blit(dev_priv))
+ if (vps->content_fb_type == SEPARATE_BO)
vps->cpp = new_fb->pitches[0] / new_fb->width;
return 0;
@@ -1174,14 +1024,6 @@ out_srf_unref:
return ret;
}
-static uint32_t vmw_stdu_bo_fifo_size(struct vmw_du_update_plane *update,
- uint32_t num_hits)
-{
- return sizeof(struct vmw_stdu_dma) + sizeof(SVGA3dCopyBox) * num_hits +
- sizeof(SVGA3dCmdSurfaceDMASuffix) +
- sizeof(struct vmw_stdu_update);
-}
-
static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
uint32_t num_hits)
{
@@ -1189,68 +1031,6 @@ static uint32_t vmw_stdu_bo_fifo_size_cpu(struct vmw_du_update_plane *update,
sizeof(struct vmw_stdu_update);
}
-static uint32_t vmw_stdu_bo_populate_dma(struct vmw_du_update_plane *update,
- void *cmd, uint32_t num_hits)
-{
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer_bo *vfbbo;
- struct vmw_stdu_dma *cmd_dma = cmd;
-
- stdu = container_of(update->du, typeof(*stdu), base);
- vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
-
- cmd_dma->header.id = SVGA_3D_CMD_SURFACE_DMA;
- cmd_dma->header.size = sizeof(cmd_dma->body) +
- sizeof(struct SVGA3dCopyBox) * num_hits +
- sizeof(SVGA3dCmdSurfaceDMASuffix);
- vmw_bo_get_guest_ptr(&vfbbo->buffer->base, &cmd_dma->body.guest.ptr);
- cmd_dma->body.guest.pitch = update->vfb->base.pitches[0];
- cmd_dma->body.host.sid = stdu->display_srf->res.id;
- cmd_dma->body.host.face = 0;
- cmd_dma->body.host.mipmap = 0;
- cmd_dma->body.transfer = SVGA3D_WRITE_HOST_VRAM;
-
- return sizeof(*cmd_dma);
-}
-
-static uint32_t vmw_stdu_bo_populate_clip(struct vmw_du_update_plane *update,
- void *cmd, struct drm_rect *clip,
- uint32_t fb_x, uint32_t fb_y)
-{
- struct SVGA3dCopyBox *box = cmd;
-
- box->srcx = fb_x;
- box->srcy = fb_y;
- box->srcz = 0;
- box->x = clip->x1;
- box->y = clip->y1;
- box->z = 0;
- box->w = drm_rect_width(clip);
- box->h = drm_rect_height(clip);
- box->d = 1;
-
- return sizeof(*box);
-}
-
-static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update,
- void *cmd, struct drm_rect *bb)
-{
- struct vmw_screen_target_display_unit *stdu;
- struct vmw_framebuffer_bo *vfbbo;
- SVGA3dCmdSurfaceDMASuffix *suffix = cmd;
-
- stdu = container_of(update->du, typeof(*stdu), base);
- vfbbo = container_of(update->vfb, typeof(*vfbbo), base);
-
- suffix->suffixSize = sizeof(*suffix);
- suffix->maximumOffset = vfbbo->buffer->base.base.size;
-
- vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2,
- bb->y1, bb->y2);
-
- return sizeof(*suffix) + sizeof(struct vmw_stdu_update);
-}
-
static uint32_t vmw_stdu_bo_pre_clip_cpu(struct vmw_du_update_plane *update,
void *cmd, uint32_t num_hits)
{
@@ -1300,11 +1080,11 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
diff.cpp = stdu->cpp;
- dst_bo = &stdu->display_srf->res.backup->base;
+ dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
- src_bo = &vfbbo->buffer->base;
+ src_bo = &vfbbo->buffer->tbo;
src_pitch = update->vfb->base.pitches[0];
src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
stdu->cpp;
@@ -1368,24 +1148,12 @@ static int vmw_stdu_plane_update_bo(struct vmw_private *dev_priv,
bo_update.base.vfb = vfb;
bo_update.base.out_fence = out_fence;
bo_update.base.mutex = NULL;
- bo_update.base.cpu_blit = vmw_stdu_use_cpu_blit(dev_priv);
bo_update.base.intr = false;
- /*
- * VM without 3D support don't have surface DMA command and framebuffer
- * should be moved out of VRAM.
- */
- if (bo_update.base.cpu_blit) {
- bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
- bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
- bo_update.base.clip = vmw_stdu_bo_clip_cpu;
- bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
- } else {
- bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size;
- bo_update.base.pre_clip = vmw_stdu_bo_populate_dma;
- bo_update.base.clip = vmw_stdu_bo_populate_clip;
- bo_update.base.post_clip = vmw_stdu_bo_populate_update;
- }
+ bo_update.base.calc_fifo_size = vmw_stdu_bo_fifo_size_cpu;
+ bo_update.base.pre_clip = vmw_stdu_bo_pre_clip_cpu;
+ bo_update.base.clip = vmw_stdu_bo_clip_cpu;
+ bo_update.base.post_clip = vmw_stdu_bo_populate_update_cpu;
return vmw_du_helper_plane_update(&bo_update.base);
}
@@ -1548,7 +1316,6 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
srf_update.vfb = vfb;
srf_update.out_fence = out_fence;
srf_update.mutex = &dev_priv->cmdbuf_mutex;
- srf_update.cpu_blit = false;
srf_update.intr = true;
if (vfbs->is_bo_proxy)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
index 2de97419d5c9..edcc40659038 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2018-2019 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2018-2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -26,11 +26,12 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_placement.h>
-
+#include "vmwgfx_binding.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
-#include "vmwgfx_binding.h"
+
+#include <drm/ttm/ttm_placement.h>
/**
* struct vmw_dx_streamoutput - Streamoutput resource metadata.
@@ -62,10 +63,11 @@ static void vmw_dx_streamoutput_commit_notify(struct vmw_resource *res,
static const struct vmw_res_func vmw_dx_streamoutput_func = {
.res_type = vmw_res_streamoutput,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = false,
.type_name = "DX streamoutput",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_dx_streamoutput_create,
.destroy = NULL, /* Command buffer managed resource. */
.bind = vmw_dx_streamoutput_bind,
@@ -104,8 +106,8 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
cmd->header.size = sizeof(cmd->body);
cmd->body.soid = so->id;
- cmd->body.mobid = res->backup->base.resource->start;
- cmd->body.offsetInBytes = res->backup_offset;
+ cmd->body.mobid = res->guest_memory_bo->tbo.resource->start;
+ cmd->body.offsetInBytes = res->guest_memory_offset;
cmd->body.sizeInBytes = so->size;
vmw_cmd_commit(dev_priv, sizeof(*cmd));
@@ -195,7 +197,7 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
struct vmw_fence_obj *fence;
int ret;
- if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB))
+ if (WARN_ON(res->guest_memory_bo->tbo.resource->mem_type != VMW_PL_MOB))
return -EINVAL;
mutex_lock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index dcfb003841b3..5db403ee8261 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,8 +25,7 @@
*
**************************************************************************/
-#include <drm/ttm/ttm_placement.h>
-
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
#include "vmwgfx_so.h"
@@ -34,6 +33,8 @@
#include "vmw_surface_cache.h"
#include "device_include/svga3d_surfacedefs.h"
+#include <drm/ttm/ttm_placement.h>
+
#define SVGA3D_FLAGS_64(upper32, lower32) (((uint64_t)upper32 << 32) | lower32)
#define SVGA3D_FLAGS_UPPER_32(svga3d_flags) (svga3d_flags >> 32)
#define SVGA3D_FLAGS_LOWER_32(svga3d_flags) \
@@ -125,12 +126,13 @@ const struct vmw_user_resource_conv *user_surface_converter =
static const struct vmw_res_func vmw_legacy_surface_func = {
.res_type = vmw_res_surface,
- .needs_backup = false,
+ .needs_guest_memory = false,
.may_evict = true,
.prio = 1,
.dirty_prio = 1,
.type_name = "legacy surfaces",
- .backup_placement = &vmw_srf_placement,
+ .domain = VMW_BO_DOMAIN_GMR,
+ .busy_domain = VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
.create = &vmw_legacy_srf_create,
.destroy = &vmw_legacy_srf_destroy,
.bind = &vmw_legacy_srf_bind,
@@ -139,12 +141,13 @@ static const struct vmw_res_func vmw_legacy_surface_func = {
static const struct vmw_res_func vmw_gb_surface_func = {
.res_type = vmw_res_surface,
- .needs_backup = true,
+ .needs_guest_memory = true,
.may_evict = true,
.prio = 1,
.dirty_prio = 2,
.type_name = "guest backed surfaces",
- .backup_placement = &vmw_mob_placement,
+ .domain = VMW_BO_DOMAIN_MOB,
+ .busy_domain = VMW_BO_DOMAIN_MOB,
.create = vmw_gb_surface_create,
.destroy = vmw_gb_surface_destroy,
.bind = vmw_gb_surface_bind,
@@ -379,7 +382,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
*/
mutex_lock(&dev_priv->cmdbuf_mutex);
- dev_priv->used_memory_size -= res->backup_size;
+ dev_priv->used_memory_size -= res->guest_memory_size;
mutex_unlock(&dev_priv->cmdbuf_mutex);
}
}
@@ -409,7 +412,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
return 0;
srf = vmw_res_to_srf(res);
- if (unlikely(dev_priv->used_memory_size + res->backup_size >=
+ if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >=
dev_priv->memory_size))
return -EBUSY;
@@ -447,7 +450,7 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
* Surface memory usage accounting.
*/
- dev_priv->used_memory_size += res->backup_size;
+ dev_priv->used_memory_size += res->guest_memory_size;
return 0;
out_no_fifo:
@@ -524,7 +527,7 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
static int vmw_legacy_srf_bind(struct vmw_resource *res,
struct ttm_validate_buffer *val_buf)
{
- if (!res->backup_dirty)
+ if (!res->guest_memory_dirty)
return 0;
return vmw_legacy_srf_dma(res, val_buf, true);
@@ -583,7 +586,7 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
* Surface memory usage accounting.
*/
- dev_priv->used_memory_size -= res->backup_size;
+ dev_priv->used_memory_size -= res->guest_memory_size;
/*
* Release the surface ID.
@@ -683,8 +686,8 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
container_of(base, struct vmw_user_surface, prime.base);
struct vmw_resource *res = &user_srf->srf.res;
- if (res && res->backup)
- drm_gem_object_put(&res->backup->base.base);
+ if (res->guest_memory_bo)
+ drm_gem_object_put(&res->guest_memory_bo->tbo.base);
*p_base = NULL;
vmw_resource_unreference(&res);
@@ -812,7 +815,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
++cur_size;
}
}
- res->backup_size = cur_bo_offset;
+ res->guest_memory_size = cur_bo_offset;
if (metadata->scanout &&
metadata->num_sizes == 1 &&
metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH &&
@@ -856,14 +859,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ret = vmw_gem_object_create_with_handle(dev_priv,
file_priv,
- res->backup_size,
+ res->guest_memory_size,
&backup_handle,
- &res->backup);
+ &res->guest_memory_bo);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
}
- vmw_bo_reference(res->backup);
+ vmw_bo_reference(res->guest_memory_bo);
/*
* We don't expose the handle to the userspace and surface
* already holds a gem reference
@@ -872,7 +875,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
}
tmp = vmw_resource_reference(&srf->res);
- ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
&vmw_user_surface_base_release);
@@ -1186,7 +1189,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
- submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
+ submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0);
cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd1))
@@ -1196,7 +1199,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
cmd1->header.size = sizeof(cmd1->body);
cmd1->body.sid = res->id;
cmd1->body.mobid = bo->resource->start;
- if (res->backup_dirty) {
+ if (res->guest_memory_dirty) {
cmd2 = (void *) &cmd1[1];
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
cmd2->header.size = sizeof(cmd2->body);
@@ -1204,12 +1207,12 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
}
vmw_cmd_commit(dev_priv, submit_size);
- if (res->backup->dirty && res->backup_dirty) {
+ if (res->guest_memory_bo->dirty && res->guest_memory_dirty) {
/* We've just made a full upload. Cear dirty regions. */
vmw_bo_dirty_clear_res(res);
}
- res->backup_dirty = false;
+ res->guest_memory_dirty = false;
return 0;
}
@@ -1505,11 +1508,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
- &res->backup);
+ &res->guest_memory_bo);
if (ret == 0) {
- if (res->backup->base.base.size < res->backup_size) {
+ if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
- vmw_bo_unreference(&res->backup);
+ vmw_bo_unreference(&res->guest_memory_bo);
ret = -EINVAL;
goto out_unlock;
} else {
@@ -1520,11 +1523,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
(drm_vmw_surface_flag_create_buffer |
drm_vmw_surface_flag_coherent)) {
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
- res->backup_size,
+ res->guest_memory_size,
&backup_handle,
- &res->backup);
+ &res->guest_memory_bo);
if (ret == 0)
- vmw_bo_reference(res->backup);
+ vmw_bo_reference(res->guest_memory_bo);
}
if (unlikely(ret != 0)) {
@@ -1533,9 +1536,9 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) {
- struct vmw_buffer_object *backup = res->backup;
+ struct vmw_bo *backup = res->guest_memory_bo;
- ttm_bo_reserve(&backup->base, false, false, NULL);
+ ttm_bo_reserve(&backup->tbo, false, false, NULL);
if (!res->func->dirty_alloc)
ret = -EINVAL;
if (!ret)
@@ -1544,7 +1547,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
res->coherent = true;
ret = res->func->dirty_alloc(res);
}
- ttm_bo_unreserve(&backup->base);
+ ttm_bo_unreserve(&backup->tbo);
if (ret) {
vmw_resource_unreference(&res);
goto out_unlock;
@@ -1553,7 +1556,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
tmp = vmw_resource_reference(res);
- ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+ ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
req->base.drm_surface_flags &
drm_vmw_surface_flag_shareable,
VMW_RES_SURFACE,
@@ -1566,11 +1569,11 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
}
rep->handle = user_srf->prime.base.handle;
- rep->backup_size = res->backup_size;
- if (res->backup) {
+ rep->backup_size = res->guest_memory_size;
+ if (res->guest_memory_bo) {
rep->buffer_map_handle =
- drm_vma_node_offset_addr(&res->backup->base.base.vma_node);
- rep->buffer_size = res->backup->base.base.size;
+ drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node);
+ rep->buffer_size = res->guest_memory_bo->tbo.base.size;
rep->buffer_handle = backup_handle;
} else {
rep->buffer_map_handle = 0;
@@ -1613,14 +1616,14 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
user_srf = container_of(base, struct vmw_user_surface, prime.base);
srf = &user_srf->srf;
- if (!srf->res.backup) {
+ if (!srf->res.guest_memory_bo) {
DRM_ERROR("Shared GB surface is missing a backup buffer.\n");
goto out_bad_resource;
}
metadata = &srf->metadata;
mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */
- ret = drm_gem_handle_create(file_priv, &srf->res.backup->base.base,
+ ret = drm_gem_handle_create(file_priv, &srf->res.guest_memory_bo->tbo.base,
&backup_handle);
mutex_unlock(&dev_priv->cmdbuf_mutex);
if (ret != 0) {
@@ -1639,11 +1642,11 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
rep->creq.base.buffer_handle = backup_handle;
rep->creq.base.base_size = metadata->base_size;
rep->crep.handle = user_srf->prime.base.handle;
- rep->crep.backup_size = srf->res.backup_size;
+ rep->crep.backup_size = srf->res.guest_memory_size;
rep->crep.buffer_handle = backup_handle;
rep->crep.buffer_map_handle =
- drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node);
- rep->crep.buffer_size = srf->res.backup->base.base.size;
+ drm_vma_node_offset_addr(&srf->res.guest_memory_bo->tbo.base.vma_node);
+ rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size;
rep->creq.version = drm_vmw_gb_surface_v1;
rep->creq.svga3d_flags_upper_32_bits =
@@ -1742,12 +1745,12 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
{
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
- size_t backup_end = res->backup_offset + res->backup_size;
+ size_t backup_end = res->guest_memory_offset + res->guest_memory_size;
struct vmw_surface_loc loc1, loc2;
const struct vmw_surface_cache *cache;
- start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
- end = min(end, backup_end) - res->backup_offset;
+ start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
+ end = min(end, backup_end) - res->guest_memory_offset;
cache = &dirty->cache;
vmw_surface_get_loc(cache, &loc1, start);
vmw_surface_get_loc(cache, &loc2, end - 1);
@@ -1794,13 +1797,13 @@ static void vmw_surface_buf_dirty_range_add(struct vmw_resource *res,
struct vmw_surface_dirty *dirty =
(struct vmw_surface_dirty *) res->dirty;
const struct vmw_surface_cache *cache = &dirty->cache;
- size_t backup_end = res->backup_offset + cache->mip_chain_bytes;
+ size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes;
SVGA3dBox *box = &dirty->boxes[0];
u32 box_c2;
box->h = box->d = 1;
- start = max_t(size_t, start, res->backup_offset) - res->backup_offset;
- end = min(end, backup_end) - res->backup_offset;
+ start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset;
+ end = min(end, backup_end) - res->guest_memory_offset;
box_c2 = box->x + box->w;
if (box->w == 0 || box->x > start)
box->x = start;
@@ -1816,8 +1819,8 @@ static void vmw_surface_dirty_range_add(struct vmw_resource *res, size_t start,
{
struct vmw_surface *srf = vmw_res_to_srf(res);
- if (WARN_ON(end <= res->backup_offset ||
- start >= res->backup_offset + res->backup_size))
+ if (WARN_ON(end <= res->guest_memory_offset ||
+ start >= res->guest_memory_offset + res->guest_memory_size))
return;
if (srf->metadata.format == SVGA3D_BUFFER)
@@ -2074,7 +2077,7 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE)
sample_count = metadata->multisample_count;
- srf->res.backup_size =
+ srf->res.guest_memory_size =
vmw_surface_get_serialized_size_extended(
metadata->format,
metadata->base_size,
@@ -2083,7 +2086,7 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
sample_count);
if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
- srf->res.backup_size += sizeof(SVGA3dDXSOState);
+ srf->res.guest_memory_size += sizeof(SVGA3dDXSOState);
/*
* Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 856a352a72a6..af8562c95cc3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
+ * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
@@ -25,6 +25,7 @@
*
**************************************************************************/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include <drm/ttm/ttm_placement.h>
@@ -49,13 +50,6 @@ static const struct ttm_place gmr_placement_flags = {
.flags = 0
};
-static const struct ttm_place mob_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = 0
-};
-
struct ttm_placement vmw_vram_placement = {
.num_placement = 1,
.placement = &vram_placement_flags,
@@ -77,27 +71,6 @@ static const struct ttm_place vram_gmr_placement_flags[] = {
}
};
-static const struct ttm_place gmr_vram_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_VRAM,
- .flags = 0
- }
-};
-
-static const struct ttm_place vmw_sys_placement_flags = {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_SYSTEM,
- .flags = 0
-};
-
struct ttm_placement vmw_vram_gmr_placement = {
.num_placement = 2,
.placement = vram_gmr_placement_flags,
@@ -105,13 +78,6 @@ struct ttm_placement vmw_vram_gmr_placement = {
.busy_placement = &gmr_placement_flags
};
-struct ttm_placement vmw_vram_sys_placement = {
- .num_placement = 1,
- .placement = &vram_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
-};
-
struct ttm_placement vmw_sys_placement = {
.num_placement = 1,
.placement = &sys_placement_flags,
@@ -119,53 +85,6 @@ struct ttm_placement vmw_sys_placement = {
.busy_placement = &sys_placement_flags
};
-struct ttm_placement vmw_pt_sys_placement = {
- .num_placement = 1,
- .placement = &vmw_sys_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &vmw_sys_placement_flags
-};
-
-static const struct ttm_place nonfixed_placement_flags[] = {
- {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = TTM_PL_SYSTEM,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_GMR,
- .flags = 0
- }, {
- .fpfn = 0,
- .lpfn = 0,
- .mem_type = VMW_PL_MOB,
- .flags = 0
- }
-};
-
-struct ttm_placement vmw_srf_placement = {
- .num_placement = 1,
- .num_busy_placement = 2,
- .placement = &gmr_placement_flags,
- .busy_placement = gmr_vram_placement_flags
-};
-
-struct ttm_placement vmw_mob_placement = {
- .num_placement = 1,
- .num_busy_placement = 1,
- .placement = &mob_placement_flags,
- .busy_placement = &mob_placement_flags
-};
-
-struct ttm_placement vmw_nonfixed_placement = {
- .num_placement = 3,
- .placement = nonfixed_placement_flags,
- .num_busy_placement = 1,
- .busy_placement = &sys_placement_flags
-};
-
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/**
@@ -508,7 +427,7 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
if (!vmw_be)
return NULL;
- vmw_be->dev_priv = container_of(bo->bdev, struct vmw_private, bdev);
+ vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
vmw_be->mob = NULL;
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
@@ -534,7 +453,7 @@ static void vmw_evict_flags(struct ttm_buffer_object *bo,
static int vmw_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
- struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
+ struct vmw_private *dev_priv = vmw_priv_from_ttm(bdev);
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
@@ -596,9 +515,13 @@ static int vmw_move(struct ttm_buffer_object *bo,
struct ttm_resource *new_mem,
struct ttm_place *hop)
{
- struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
- struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
- int ret;
+ struct ttm_resource_manager *new_man;
+ struct ttm_resource_manager *old_man = NULL;
+ int ret = 0;
+
+ new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
+ if (bo->resource)
+ old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
if (new_man->use_tt && !vmw_memtype_is_system(new_mem->mem_type)) {
ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem);
@@ -606,9 +529,15 @@ static int vmw_move(struct ttm_buffer_object *bo,
return ret;
}
+ if (!bo->resource || (bo->resource->mem_type == TTM_PL_SYSTEM &&
+ bo->ttm == NULL)) {
+ ttm_bo_move_null(bo, new_mem);
+ return 0;
+ }
+
vmw_move_notify(bo, bo->resource, new_mem);
- if (old_man->use_tt && new_man->use_tt) {
+ if (old_man && old_man->use_tt && new_man->use_tt) {
if (vmw_memtype_is_system(bo->resource->mem_type)) {
ttm_bo_move_null(bo, new_mem);
return 0;
@@ -645,34 +574,39 @@ struct ttm_device_funcs vmw_bo_driver = {
};
int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
- unsigned long bo_size,
- struct ttm_buffer_object **bo_p)
+ size_t bo_size, u32 domain,
+ struct vmw_bo **bo_p)
{
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
};
- struct ttm_buffer_object *bo;
+ struct vmw_bo *vbo;
int ret;
+ struct vmw_bo_params bo_params = {
+ .domain = domain,
+ .busy_domain = domain,
+ .bo_type = ttm_bo_type_kernel,
+ .size = bo_size,
+ .pin = true
+ };
- ret = vmw_bo_create_kernel(dev_priv, bo_size,
- &vmw_pt_sys_placement,
- &bo);
+ ret = vmw_bo_create(dev_priv, &bo_params, &vbo);
if (unlikely(ret != 0))
return ret;
- ret = ttm_bo_reserve(bo, false, true, NULL);
+ ret = ttm_bo_reserve(&vbo->tbo, false, true, NULL);
BUG_ON(ret != 0);
- ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
+ ret = vmw_ttm_populate(vbo->tbo.bdev, vbo->tbo.ttm, &ctx);
if (likely(ret == 0)) {
struct vmw_ttm_tt *vmw_tt =
- container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
+ container_of(vbo->tbo.ttm, struct vmw_ttm_tt, dma_ttm);
ret = vmw_ttm_map_dma(vmw_tt);
}
- ttm_bo_unreserve(bo);
+ ttm_bo_unreserve(&vbo->tbo);
if (likely(ret == 0))
- *bo_p = bo;
+ *bo_p = vbo;
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
index 6ad744ae07f5..d140089e53d4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_va.c
@@ -25,6 +25,7 @@
*
**************************************************************************/
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
#include "vmwgfx_resource_priv.h"
@@ -80,10 +81,11 @@ static void vmw_stream_set_arg_handle(void *data, u32 handle)
static const struct vmw_simple_resource_func va_stream_func = {
.res_func = {
.res_type = vmw_res_stream,
- .needs_backup = false,
+ .needs_guest_memory = false,
.may_evict = false,
.type_name = "overlay stream",
- .backup_placement = NULL,
+ .domain = VMW_BO_DOMAIN_SYS,
+ .busy_domain = VMW_BO_DOMAIN_SYS,
.create = NULL,
.destroy = NULL,
.bind = NULL,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index f5c4a40fb16d..aaacbdcbd742 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR MIT
/**************************************************************************
*
- * Copyright © 2018 - 2022 VMware, Inc., Palo Alto, CA., USA
+ * Copyright © 2018 - 2023 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -25,9 +25,12 @@
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
-#include <linux/slab.h>
-#include "vmwgfx_validation.h"
+#include "vmwgfx_bo.h"
#include "vmwgfx_drv.h"
+#include "vmwgfx_resource_priv.h"
+#include "vmwgfx_validation.h"
+
+#include <linux/slab.h>
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
@@ -38,8 +41,6 @@
* @hash: A hash entry used for the duplicate detection hash table.
* @coherent_count: If switching backup buffers, number of new coherent
* resources that will have this buffer as a backup buffer.
- * @as_mob: Validate as mob.
- * @cpu_blit: Validate for cpu blit access.
*
* Bit fields are used since these structures are allocated and freed in
* large numbers and space conservation is desired.
@@ -48,21 +49,19 @@ struct vmw_validation_bo_node {
struct ttm_validate_buffer base;
struct vmwgfx_hash_item hash;
unsigned int coherent_count;
- u32 as_mob : 1;
- u32 cpu_blit : 1;
};
/**
* struct vmw_validation_res_node - Resource validation metadata.
* @head: List head for the resource validation list.
* @hash: A hash entry used for the duplicate detection hash table.
* @res: Reference counted resource pointer.
- * @new_backup: Non ref-counted pointer to new backup buffer to be assigned
- * to a resource.
- * @new_backup_offset: Offset into the new backup mob for resources that can
- * share MOBs.
+ * @new_guest_memory_bo: Non ref-counted pointer to new guest memory buffer
+ * to be assigned to a resource.
+ * @new_guest_memory_offset: Offset into the new backup mob for resources
+ * that can share MOBs.
* @no_buffer_needed: Kernel does not need to allocate a MOB during validation,
* the command stream provides a mob bind operation.
- * @switching_backup: The validation process is switching backup MOB.
+ * @switching_guest_memory_bo: The validation process is switching backup MOB.
* @first_usage: True iff the resource has been seen only once in the current
* validation batch.
* @reserved: Whether the resource is currently reserved by this process.
@@ -77,10 +76,10 @@ struct vmw_validation_res_node {
struct list_head head;
struct vmwgfx_hash_item hash;
struct vmw_resource *res;
- struct vmw_buffer_object *new_backup;
- unsigned long new_backup_offset;
+ struct vmw_bo *new_guest_memory_bo;
+ unsigned long new_guest_memory_offset;
u32 no_buffer_needed : 1;
- u32 switching_backup : 1;
+ u32 switching_guest_memory_bo : 1;
u32 first_usage : 1;
u32 reserved : 1;
u32 dirty : 1;
@@ -173,7 +172,7 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx)
*/
static struct vmw_validation_bo_node *
vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
- struct vmw_buffer_object *vbo)
+ struct vmw_bo *vbo)
{
struct vmw_validation_bo_node *bo_node = NULL;
@@ -194,7 +193,7 @@ vmw_validation_find_bo_dup(struct vmw_validation_context *ctx,
struct vmw_validation_bo_node *entry;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- if (entry->base.bo == &vbo->base) {
+ if (entry->base.bo == &vbo->tbo) {
bo_node = entry;
break;
}
@@ -258,26 +257,16 @@ out:
* vmw_validation_add_bo - Add a buffer object to the validation context.
* @ctx: The validation context.
* @vbo: The buffer object.
- * @as_mob: Validate as mob, otherwise suitable for GMR operations.
- * @cpu_blit: Validate in a page-mappable location.
*
* Return: Zero on success, negative error code otherwise.
*/
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
- struct vmw_buffer_object *vbo,
- bool as_mob,
- bool cpu_blit)
+ struct vmw_bo *vbo)
{
struct vmw_validation_bo_node *bo_node;
bo_node = vmw_validation_find_bo_dup(ctx, vbo);
- if (bo_node) {
- if (bo_node->as_mob != as_mob ||
- bo_node->cpu_blit != cpu_blit) {
- DRM_ERROR("Inconsistent buffer usage.\n");
- return -EINVAL;
- }
- } else {
+ if (!bo_node) {
struct ttm_validate_buffer *val_buf;
bo_node = vmw_validation_mem_alloc(ctx, sizeof(*bo_node));
@@ -290,13 +279,11 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
bo_node->hash.key);
}
val_buf = &bo_node->base;
- val_buf->bo = ttm_bo_get_unless_zero(&vbo->base);
+ val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo);
if (!val_buf->bo)
return -ESRCH;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &ctx->bo_list);
- bo_node->as_mob = as_mob;
- bo_node->cpu_blit = cpu_blit;
}
return 0;
@@ -406,23 +393,23 @@ void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
* the resource.
* @vbo: The new backup buffer object MOB. This buffer object needs to have
* already been registered with the validation context.
- * @backup_offset: Offset into the new backup MOB.
+ * @guest_memory_offset: Offset into the new backup MOB.
*/
void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
void *val_private,
- struct vmw_buffer_object *vbo,
- unsigned long backup_offset)
+ struct vmw_bo *vbo,
+ unsigned long guest_memory_offset)
{
struct vmw_validation_res_node *val;
val = container_of(val_private, typeof(*val), private);
- val->switching_backup = 1;
+ val->switching_guest_memory_bo = 1;
if (val->first_usage)
val->no_buffer_needed = 1;
- val->new_backup = vbo;
- val->new_backup_offset = backup_offset;
+ val->new_guest_memory_bo = vbo;
+ val->new_guest_memory_offset = guest_memory_offset;
}
/**
@@ -450,21 +437,22 @@ int vmw_validation_res_reserve(struct vmw_validation_context *ctx,
goto out_unreserve;
val->reserved = 1;
- if (res->backup) {
- struct vmw_buffer_object *vbo = res->backup;
+ if (res->guest_memory_bo) {
+ struct vmw_bo *vbo = res->guest_memory_bo;
- ret = vmw_validation_add_bo
- (ctx, vbo, vmw_resource_needs_backup(res),
- false);
+ vmw_bo_placement_set(vbo,
+ res->func->domain,
+ res->func->busy_domain);
+ ret = vmw_validation_add_bo(ctx, vbo);
if (ret)
goto out_unreserve;
}
- if (val->switching_backup && val->new_backup &&
+ if (val->switching_guest_memory_bo && val->new_guest_memory_bo &&
res->coherent) {
struct vmw_validation_bo_node *bo_node =
vmw_validation_find_bo_dup(ctx,
- val->new_backup);
+ val->new_guest_memory_bo);
if (WARN_ON(!bo_node)) {
ret = -EINVAL;
@@ -507,9 +495,9 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
vmw_resource_unreserve(val->res,
val->dirty_set,
val->dirty,
- val->switching_backup,
- val->new_backup,
- val->new_backup_offset);
+ val->switching_guest_memory_bo,
+ val->new_guest_memory_bo,
+ val->new_guest_memory_offset);
}
}
@@ -517,17 +505,14 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
* vmw_validation_bo_validate_single - Validate a single buffer object.
* @bo: The TTM buffer object base.
* @interruptible: Whether to perform waits interruptible if possible.
- * @validate_as_mob: Whether to validate in MOB memory.
*
* Return: Zero on success, -ERESTARTSYS if interrupted. Negative error
* code on failure.
*/
-int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob)
+static int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
+ bool interruptible)
{
- struct vmw_buffer_object *vbo =
- container_of(bo, struct vmw_buffer_object, base);
+ struct vmw_bo *vbo = to_vmw_bo(&bo->base);
struct ttm_operation_ctx ctx = {
.interruptible = interruptible,
.no_wait_gpu = false
@@ -537,30 +522,20 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
if (atomic_read(&vbo->cpu_writers))
return -EBUSY;
- if (vbo->base.pin_count > 0)
+ if (vbo->tbo.pin_count > 0)
return 0;
- if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
-
- /**
- * Put BO in VRAM if there is space, otherwise as a GMR.
- * If there is no space in VRAM and GMR ids are all used up,
- * start evicting GMRs to make room. If the DMA buffer can't be
- * used as a GMR, this will return -ENOMEM.
- */
-
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
+ ret = ttm_bo_validate(bo, &vbo->placement, &ctx);
if (ret == 0 || ret == -ERESTARTSYS)
return ret;
- /**
- * If that failed, try VRAM again, this time evicting
+ /*
+ * If that failed, try again, this time evicting
* previous contents.
*/
+ ctx.allow_res_evict = true;
- ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
- return ret;
+ return ttm_bo_validate(bo, &vbo->placement, &ctx);
}
/**
@@ -578,21 +553,10 @@ int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr)
int ret;
list_for_each_entry(entry, &ctx->bo_list, base.head) {
- struct vmw_buffer_object *vbo =
- container_of(entry->base.bo, typeof(*vbo), base);
-
- if (entry->cpu_blit) {
- struct ttm_operation_ctx ttm_ctx = {
- .interruptible = intr,
- .no_wait_gpu = false
- };
-
- ret = ttm_bo_validate(entry->base.bo,
- &vmw_nonfixed_placement, &ttm_ctx);
- } else {
- ret = vmw_validation_bo_validate_single
- (entry->base.bo, intr, entry->as_mob);
- }
+ struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
+
+ ret = vmw_validation_bo_validate_single(entry->base.bo, intr);
+
if (ret)
return ret;
@@ -639,7 +603,7 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
list_for_each_entry(val, &ctx->resource_list, head) {
struct vmw_resource *res = val->res;
- struct vmw_buffer_object *backup = res->backup;
+ struct vmw_bo *backup = res->guest_memory_bo;
ret = vmw_resource_validate(res, intr, val->dirty_set &&
val->dirty);
@@ -650,12 +614,12 @@ int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr)
}
/* Check if the resource switched backup buffer */
- if (backup && res->backup && (backup != res->backup)) {
- struct vmw_buffer_object *vbo = res->backup;
+ if (backup && res->guest_memory_bo && backup != res->guest_memory_bo) {
+ struct vmw_bo *vbo = res->guest_memory_bo;
- ret = vmw_validation_add_bo
- (ctx, vbo, vmw_resource_needs_backup(res),
- false);
+ vmw_bo_placement_set(vbo, res->func->domain,
+ res->func->busy_domain);
+ ret = vmw_validation_add_bo(ctx, vbo);
if (ret)
return ret;
}
@@ -889,9 +853,7 @@ void vmw_validation_bo_backoff(struct vmw_validation_context *ctx)
list_for_each_entry(entry, &ctx->bo_list, base.head) {
if (entry->coherent_count) {
unsigned int coherent_count = entry->coherent_count;
- struct vmw_buffer_object *vbo =
- container_of(entry->base.bo, typeof(*vbo),
- base);
+ struct vmw_bo *vbo = to_vmw_bo(&entry->base.bo->base);
while (coherent_count--)
vmw_bo_dirty_release(vbo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index ab9ec226f433..240ee0c4ebfd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -73,7 +73,7 @@ struct vmw_validation_context {
size_t total_mem;
};
-struct vmw_buffer_object;
+struct vmw_bo;
struct vmw_resource;
struct vmw_fence_obj;
@@ -159,11 +159,7 @@ static inline unsigned int vmw_validation_align(unsigned int val)
}
int vmw_validation_add_bo(struct vmw_validation_context *ctx,
- struct vmw_buffer_object *vbo,
- bool as_mob, bool cpu_blit);
-int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo,
- bool interruptible,
- bool validate_as_mob);
+ struct vmw_bo *vbo);
int vmw_validation_bo_validate(struct vmw_validation_context *ctx, bool intr);
void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
@@ -179,7 +175,7 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
bool backoff);
void vmw_validation_res_switch_backup(struct vmw_validation_context *ctx,
void *val_private,
- struct vmw_buffer_object *vbo,
+ struct vmw_bo *vbo,
unsigned long backup_offset);
int vmw_validation_res_validate(struct vmw_validation_context *ctx, bool intr);
diff --git a/drivers/ps3/ps3av.c b/drivers/ps3/ps3av.c
index 516e6d14d32e..f6c9e56bdba7 100644
--- a/drivers/ps3/ps3av.c
+++ b/drivers/ps3/ps3av.c
@@ -11,13 +11,14 @@
#include <linux/delay.h>
#include <linux/notifier.h>
#include <linux/ioctl.h>
-#include <linux/fb.h>
#include <linux/slab.h>
#include <asm/firmware.h>
#include <asm/ps3av.h>
#include <asm/ps3.h>
+#include <video/cmdline.h>
+
#include "vuart.h"
#define BUFSIZE 4096 /* vuart buf size */
@@ -921,6 +922,7 @@ EXPORT_SYMBOL_GPL(ps3av_audio_mute);
static int ps3av_probe(struct ps3_system_bus_device *dev)
{
+ const char *mode_option;
int res;
int id;
@@ -968,10 +970,9 @@ static int ps3av_probe(struct ps3_system_bus_device *dev)
ps3av_get_hw_conf(ps3av);
-#ifdef CONFIG_FB
- if (fb_mode_option && !strcmp(fb_mode_option, "safe"))
+ mode_option = video_get_options(NULL);
+ if (mode_option && !strcmp(mode_option, "safe"))
safe_mode = 1;
-#endif /* CONFIG_FB */
id = ps3av_auto_videomode(&ps3av->av_hw_conf);
if (id < 0) {
printk(KERN_ERR "%s: invalid id :%d\n", __func__, id);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 6d2fde6c5d11..bf05363d8906 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -11,6 +11,9 @@ config APERTURE_HELPERS
Support tracking and hand-over of aperture ownership. Required
by graphics drivers for firmware-provided framebuffers.
+config VIDEO_CMDLINE
+ bool
+
config VIDEO_NOMODESET
bool
default n
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index a50eb528ed3c..831c9fa57a6c 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_APERTURE_HELPERS) += aperture.o
obj-$(CONFIG_VGASTATE) += vgastate.o
+obj-$(CONFIG_VIDEO_CMDLINE) += cmdline.o
obj-$(CONFIG_VIDEO_NOMODESET) += nomodeset.o
obj-$(CONFIG_HDMI) += hdmi.o
diff --git a/drivers/video/cmdline.c b/drivers/video/cmdline.c
new file mode 100644
index 000000000000..d3d257489c3d
--- /dev/null
+++ b/drivers/video/cmdline.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on the fbdev code in drivers/video/fbdev/core/fb_cmdline:
+ *
+ * Copyright (C) 2014 Intel Corp
+ * Copyright (C) 1994 Martin Schaller
+ *
+ * 2001 - Documented with DocBook
+ * - Brad Douglas <brad@neruo.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Authors:
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
+ */
+
+#include <linux/fb.h> /* for FB_MAX */
+#include <linux/init.h>
+
+#include <video/cmdline.h>
+
+/*
+ * FB_MAX is the maximum number of framebuffer devices and also
+ * the maximum number of video= parameters. Although not directly
+ * related to each other, it makes sense to keep it that way.
+ */
+static const char *video_options[FB_MAX] __read_mostly;
+static const char *video_option __read_mostly;
+static int video_of_only __read_mostly;
+
+static const char *__video_get_option_string(const char *name)
+{
+ const char *options = NULL;
+ size_t name_len = 0;
+
+ if (name)
+ name_len = strlen(name);
+
+ if (name_len) {
+ unsigned int i;
+ const char *opt;
+
+ for (i = 0; i < ARRAY_SIZE(video_options); ++i) {
+ if (!video_options[i])
+ continue;
+ if (video_options[i][0] == '\0')
+ continue;
+ opt = video_options[i];
+ if (!strncmp(opt, name, name_len) && opt[name_len] == ':')
+ options = opt + name_len + 1;
+ }
+ }
+
+ /* No match, return global options */
+ if (!options)
+ options = video_option;
+
+ return options;
+}
+
+/**
+ * video_get_options - get kernel boot parameters
+ * @name: name of the output as it would appear in the boot parameter
+ * line (video=<name>:<options>)
+ *
+ * Looks up the video= options for the given name. Names are connector
+ * names with DRM, or driver names with fbdev. If no video option for
+ * the name has been specified, the function returns the global video=
+ * setting. A @name of NULL always returns the global video setting.
+ *
+ * Returns:
+ * The string of video options for the given name, or NULL if no video
+ * option has been specified.
+ */
+const char *video_get_options(const char *name)
+{
+ return __video_get_option_string(name);
+}
+EXPORT_SYMBOL(video_get_options);
+
+bool __video_get_options(const char *name, const char **options, bool is_of)
+{
+ bool enabled = true;
+ const char *opt = NULL;
+
+ if (video_of_only && !is_of)
+ enabled = false;
+
+ opt = __video_get_option_string(name);
+
+ if (options)
+ *options = opt;
+
+ return enabled;
+}
+EXPORT_SYMBOL(__video_get_options);
+
+/*
+ * Process command line options for video adapters. This function is
+ * a __setup and __init function. It only stores the options. Drivers
+ * have to call video_get_options() as necessary.
+ */
+static int __init video_setup(char *options)
+{
+ if (!options || !*options)
+ goto out;
+
+ if (!strncmp(options, "ofonly", 6)) {
+ video_of_only = true;
+ goto out;
+ }
+
+ if (strchr(options, ':')) {
+ /* named */
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(video_options); i++) {
+ if (!video_options[i]) {
+ video_options[i] = options;
+ break;
+ }
+ }
+ } else {
+ /* global */
+ video_option = options;
+ }
+
+out:
+ return 1;
+}
+__setup("video=", video_setup);
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index ff3646c30d0d..96e91570cdd3 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -3,16 +3,13 @@
# fbdev configuration
#
-config FB_CMDLINE
- bool
-
config FB_NOTIFY
bool
menuconfig FB
tristate "Support for frame buffer devices"
- select FB_CMDLINE
select FB_NOTIFY
+ select VIDEO_CMDLINE
help
The frame buffer device provides an abstraction for the graphics
hardware. It represents the frame buffer of some video hardware and
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index 26cbc965497c..08fabce76b74 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_FB_CMDLINE) += fb_cmdline.o
obj-$(CONFIG_FB_NOTIFY) += fb_notify.o
obj-$(CONFIG_FB) += fb.o
fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
- modedb.o fbcvt.o
+ modedb.o fbcvt.o fb_cmdline.o
fb-$(CONFIG_FB_DEFERRED_IO) += fb_defio.o
ifeq ($(CONFIG_FRAMEBUFFER_CONSOLE),y)
diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c
index 3b5bd666b952..4d1634c492ec 100644
--- a/drivers/video/fbdev/core/fb_cmdline.c
+++ b/drivers/video/fbdev/core/fb_cmdline.c
@@ -12,16 +12,14 @@
* for more details.
*
* Authors:
- * Vetter <danie.vetter@ffwll.ch>
+ * Daniel Vetter <daniel.vetter@ffwll.ch>
*/
-#include <linux/init.h>
-#include <linux/fb.h>
-static char *video_options[FB_MAX] __read_mostly;
-static int ofonly __read_mostly;
+#include <linux/export.h>
+#include <linux/fb.h>
+#include <linux/string.h>
-const char *fb_mode_option;
-EXPORT_SYMBOL_GPL(fb_mode_option);
+#include <video/cmdline.h>
/**
* fb_get_options - get kernel boot parameters
@@ -30,78 +28,34 @@ EXPORT_SYMBOL_GPL(fb_mode_option);
* (video=<name>:<options>)
* @option: the option will be stored here
*
+ * The caller owns the string returned in @option and is
+ * responsible for releasing the memory.
+ *
* NOTE: Needed to maintain backwards compatibility
*/
int fb_get_options(const char *name, char **option)
{
- char *opt, *options = NULL;
- int retval = 0;
- int name_len = strlen(name), i;
-
- if (name_len && ofonly && strncmp(name, "offb", 4))
- retval = 1;
+ const char *options = NULL;
+ bool is_of = false;
+ bool enabled;
- if (name_len && !retval) {
- for (i = 0; i < FB_MAX; i++) {
- if (video_options[i] == NULL)
- continue;
- if (!video_options[i][0])
- continue;
- opt = video_options[i];
- if (!strncmp(name, opt, name_len) &&
- opt[name_len] == ':')
- options = opt + name_len + 1;
- }
- }
- /* No match, pass global option */
- if (!options && option && fb_mode_option)
- options = kstrdup(fb_mode_option, GFP_KERNEL);
- if (options && !strncmp(options, "off", 3))
- retval = 1;
-
- if (option)
- *option = options;
-
- return retval;
-}
-EXPORT_SYMBOL(fb_get_options);
+ if (name)
+ is_of = strncmp(name, "offb", 4);
-/**
- * video_setup - process command line options
- * @options: string of options
- *
- * Process command line options for frame buffer subsystem.
- *
- * NOTE: This function is a __setup and __init function.
- * It only stores the options. Drivers have to call
- * fb_get_options() as necessary.
- */
-static int __init video_setup(char *options)
-{
- if (!options || !*options)
- goto out;
+ enabled = __video_get_options(name, &options, is_of);
- if (!strncmp(options, "ofonly", 6)) {
- ofonly = 1;
- goto out;
+ if (options) {
+ if (!strncmp(options, "off", 3))
+ enabled = false;
}
- if (strchr(options, ':')) {
- /* named */
- int i;
-
- for (i = 0; i < FB_MAX; i++) {
- if (video_options[i] == NULL) {
- video_options[i] = options;
- break;
- }
- }
- } else {
- /* global */
- fb_mode_option = options;
+ if (option) {
+ if (options)
+ *option = kstrdup(options, GFP_KERNEL);
+ else
+ *option = NULL;
}
-out:
- return 1;
+ return enabled ? 0 : 1; // 0 on success, 1 otherwise
}
-__setup("video=", video_setup);
+EXPORT_SYMBOL(fb_get_options);
diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
index 6473e0dfe146..23cf8eba785d 100644
--- a/drivers/video/fbdev/core/modedb.c
+++ b/drivers/video/fbdev/core/modedb.c
@@ -620,6 +620,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
const struct fb_videomode *default_mode,
unsigned int default_bpp)
{
+ char *mode_option_buf = NULL;
int i;
/* Set up defaults */
@@ -635,8 +636,10 @@ int fb_find_mode(struct fb_var_screeninfo *var,
default_bpp = 8;
/* Did the user specify a video mode? */
- if (!mode_option)
- mode_option = fb_mode_option;
+ if (!mode_option) {
+ fb_get_options(NULL, &mode_option_buf);
+ mode_option = mode_option_buf;
+ }
if (mode_option) {
const char *name = mode_option;
unsigned int namelen = strlen(name);
@@ -715,6 +718,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
res_specified = 1;
}
done:
+ kfree(mode_option_buf);
if (cvt) {
struct fb_videomode cvt_mode;
int ret;
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 33f982cd1a27..536a0b0091c3 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -210,6 +210,32 @@ int drm_atomic_helper_page_flip_target(
plane)))
/**
+ * drm_atomic_plane_enabling - check whether a plane is being enabled
+ * @old_plane_state: old atomic plane state
+ * @new_plane_state: new atomic plane state
+ *
+ * Checks the atomic state of a plane to determine whether it's being enabled
+ * or not. This also WARNs if it detects an invalid state (both CRTC and FB
+ * need to either both be NULL or both be non-NULL).
+ *
+ * RETURNS:
+ * True if the plane is being enabled, false otherwise.
+ */
+static inline bool drm_atomic_plane_enabling(struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
+{
+ /*
+ * When enabling a plane, CRTC and FB should always be set together.
+ * Anything else should be considered a bug in the atomic core, so we
+ * gently warn about it.
+ */
+ WARN_ON((!new_plane_state->crtc && new_plane_state->fb) ||
+ (new_plane_state->crtc && !new_plane_state->fb));
+
+ return !old_plane_state->crtc && new_plane_state->crtc;
+}
+
+/**
* drm_atomic_plane_disabling - check whether a plane is being disabled
* @old_plane_state: old atomic plane state
* @new_plane_state: new atomic plane state
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index 49649eb8447e..566497eeb3b8 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -139,7 +139,11 @@ struct displayid_vesa_vendor_specific_block {
u8 mso;
} __packed;
-/* DisplayID iteration */
+/*
+ * DisplayID iteration.
+ *
+ * Do not access directly, this is private.
+ */
struct displayid_iter {
const struct drm_edid *drm_edid;
@@ -147,6 +151,9 @@ struct displayid_iter {
int length;
int idx;
int ext_index;
+
+ u8 version;
+ u8 primary_use;
};
void displayid_iter_edid_begin(const struct drm_edid *drm_edid,
@@ -157,4 +164,7 @@ __displayid_iter_next(struct displayid_iter *iter);
while (((__block) = __displayid_iter_next(__iter)))
void displayid_iter_end(struct displayid_iter *iter);
+u8 displayid_version(const struct displayid_iter *iter);
+u8 displayid_primary_use(const struct displayid_iter *iter);
+
#endif
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 1d76d0686b03..5b86bb7603e7 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -400,25 +400,6 @@ struct drm_driver {
int (*dumb_map_offset)(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
- /**
- * @dumb_destroy:
- *
- * This destroys the userspace handle for the given dumb backing storage buffer.
- * Since buffer objects must be reference counted in the kernel a buffer object
- * won't be immediately freed if a framebuffer modeset object still uses it.
- *
- * Called by the user via ioctl.
- *
- * The default implementation is drm_gem_dumb_destroy(). GEM based drivers
- * must not overwrite this.
- *
- * Returns:
- *
- * Zero on success, negative errno on failure.
- */
- int (*dumb_destroy)(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
/** @major: driver major number */
int major;
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 70ae6c290bdc..571885d32907 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -61,9 +61,15 @@ struct std_timing {
u8 vfreq_aspect;
} __attribute__((packed));
-#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
-#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
-#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
+#define DRM_EDID_PT_SYNC_MASK (3 << 3)
+# define DRM_EDID_PT_ANALOG_CSYNC (0 << 3)
+# define DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC (1 << 3)
+# define DRM_EDID_PT_DIGITAL_CSYNC (2 << 3)
+# define DRM_EDID_PT_CSYNC_ON_RGB (1 << 1) /* analog csync only */
+# define DRM_EDID_PT_CSYNC_SERRATE (1 << 2)
+# define DRM_EDID_PT_DIGITAL_SEPARATE_SYNC (3 << 3)
+# define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) /* also digital csync */
+# define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
#define DRM_EDID_PT_STEREO (1 << 5)
#define DRM_EDID_PT_INTERLACED (1 << 7)
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 772a4adf5287..c76e651f2d44 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -165,6 +165,16 @@ struct drm_gem_object_funcs {
int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
/**
+ * @evict:
+ *
+ * Evicts gem object out from memory. Used by the drm_gem_object_evict()
+ * helper. Returns 0 on success, -errno otherwise.
+ *
+ * This callback is optional.
+ */
+ int (*evict)(struct drm_gem_object *obj);
+
+ /**
* @vm_ops:
*
* Virtual memory operations used with mmap.
@@ -479,4 +489,6 @@ void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan,
bool (*shrink)(struct drm_gem_object *obj));
+int drm_gem_evict(struct drm_gem_object *obj);
+
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index a2201b2488c5..5994fed5e327 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -61,20 +61,6 @@ struct drm_gem_shmem_object {
struct list_head madv_list;
/**
- * @pages_mark_dirty_on_put:
- *
- * Mark pages as dirty when they are put.
- */
- unsigned int pages_mark_dirty_on_put : 1;
-
- /**
- * @pages_mark_accessed_on_put:
- *
- * Mark pages as accessed when they are put.
- */
- unsigned int pages_mark_accessed_on_put : 1;
-
- /**
* @sgt: Scatter/gather table for imported PRIME buffers
*/
struct sg_table *sgt;
@@ -98,9 +84,23 @@ struct drm_gem_shmem_object {
unsigned int vmap_use_count;
/**
+ * @pages_mark_dirty_on_put:
+ *
+ * Mark pages as dirty when they are put.
+ */
+ bool pages_mark_dirty_on_put : 1;
+
+ /**
+ * @pages_mark_accessed_on_put:
+ *
+ * Mark pages as accessed when they are put.
+ */
+ bool pages_mark_accessed_on_put : 1;
+
+ /**
* @map_wc: map object write-combined (instead of using shmem defaults).
*/
- bool map_wc;
+ bool map_wc : 1;
};
#define to_drm_gem_shmem_obj(obj) \
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 206f495bbf06..965faf082a6d 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1331,6 +1331,32 @@ struct drm_plane_helper_funcs {
*/
void (*atomic_update)(struct drm_plane *plane,
struct drm_atomic_state *state);
+
+ /**
+ * @atomic_enable:
+ *
+ * Drivers should use this function to unconditionally enable a plane.
+ * This hook is called in-between the &drm_crtc_helper_funcs.atomic_begin
+ * and drm_crtc_helper_funcs.atomic_flush callbacks. It is called after
+ * @atomic_update, which will be called for all enabled planes. Drivers
+ * that use @atomic_enable should set up a plane in @atomic_update and
+ * afterwards enable the plane in @atomic_enable. If a plane needs to be
+ * enabled before installing the scanout buffer, drivers can still do
+ * so in @atomic_update.
+ *
+ * Note that the power state of the display pipe when this function is
+ * called depends upon the exact helpers and calling sequence the driver
+ * has picked. See drm_atomic_helper_commit_planes() for a discussion of
+ * the tradeoffs and variants of plane commit helpers.
+ *
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. If implemented, @atomic_enable should be the inverse of
+ * @atomic_disable. Drivers that don't want to use either can still
+ * implement the complete plane update in @atomic_update.
+ */
+ void (*atomic_enable)(struct drm_plane *plane,
+ struct drm_atomic_state *state);
+
/**
* @atomic_disable:
*
@@ -1351,7 +1377,8 @@ struct drm_plane_helper_funcs {
* the tradeoffs and variants of plane commit helpers.
*
* This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * transitional plane helpers, but it is optional. It's intended to
+ * reverse the effects of @atomic_enable.
*/
void (*atomic_disable)(struct drm_plane *plane,
struct drm_atomic_state *state);
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 10ab58c40746..082a6e980d01 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -15,6 +15,8 @@ struct drm_encoder;
struct drm_panel;
struct drm_bridge;
struct device_node;
+struct mipi_dsi_device_info;
+struct mipi_dsi_host;
/**
* enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection
@@ -129,6 +131,16 @@ drm_of_get_data_lanes_count_ep(const struct device_node *port,
}
#endif
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_MIPI_DSI)
+struct mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev);
+#else
+static inline struct
+mipi_dsi_host *drm_of_get_dsi_bus(struct device *dev)
+{
+ return ERR_PTR(-EINVAL);
+}
+#endif /* CONFIG_OF && CONFIG_DRM_MIPI_DSI */
+
/*
* drm_of_panel_bridge_remove - remove panel bridge
* @np: device tree node containing panel bridge output ports
diff --git a/include/drm/drm_suballoc.h b/include/drm/drm_suballoc.h
new file mode 100644
index 000000000000..c2188bb0b157
--- /dev/null
+++ b/include/drm/drm_suballoc.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright 2011 Red Hat Inc.
+ * Copyright © 2022 Intel Corporation
+ */
+#ifndef _DRM_SUBALLOC_H_
+#define _DRM_SUBALLOC_H_
+
+#include <drm/drm_mm.h>
+
+#include <linux/dma-fence.h>
+#include <linux/types.h>
+
+#define DRM_SUBALLOC_MAX_QUEUES 32
+/**
+ * struct drm_suballoc_manager - fenced range allocations
+ * @wq: Wait queue for sleeping allocations on contention.
+ * @hole: Pointer to first hole node.
+ * @olist: List of allocated ranges.
+ * @flist: Array[fence context hash] of queues of fenced allocated ranges.
+ * @size: Size of the managed range.
+ * @align: Default alignment for the managed range.
+ */
+struct drm_suballoc_manager {
+ wait_queue_head_t wq;
+ struct list_head *hole;
+ struct list_head olist;
+ struct list_head flist[DRM_SUBALLOC_MAX_QUEUES];
+ size_t size;
+ size_t align;
+};
+
+/**
+ * struct drm_suballoc - Sub-allocated range
+ * @olist: List link for list of allocated ranges.
+ * @flist: List linkk for the manager fenced allocated ranges queues.
+ * @manager: The drm_suballoc_manager.
+ * @soffset: Start offset.
+ * @eoffset: End offset + 1 so that @eoffset - @soffset = size.
+ * @dma_fence: The fence protecting the allocation.
+ */
+struct drm_suballoc {
+ struct list_head olist;
+ struct list_head flist;
+ struct drm_suballoc_manager *manager;
+ size_t soffset;
+ size_t eoffset;
+ struct dma_fence *fence;
+};
+
+void drm_suballoc_manager_init(struct drm_suballoc_manager *sa_manager,
+ size_t size, size_t align);
+
+void drm_suballoc_manager_fini(struct drm_suballoc_manager *sa_manager);
+
+struct drm_suballoc *
+drm_suballoc_new(struct drm_suballoc_manager *sa_manager, size_t size,
+ gfp_t gfp, bool intr, size_t align);
+
+void drm_suballoc_free(struct drm_suballoc *sa, struct dma_fence *fence);
+
+/**
+ * drm_suballoc_soffset - Range start.
+ * @sa: The struct drm_suballoc.
+ *
+ * Return: The start of the allocated range.
+ */
+static inline size_t drm_suballoc_soffset(struct drm_suballoc *sa)
+{
+ return sa->soffset;
+}
+
+/**
+ * drm_suballoc_eoffset - Range end.
+ * @sa: The struct drm_suballoc.
+ *
+ * Return: The end of the allocated range + 1.
+ */
+static inline size_t drm_suballoc_eoffset(struct drm_suballoc *sa)
+{
+ return sa->eoffset;
+}
+
+/**
+ * drm_suballoc_size - Range size.
+ * @sa: The struct drm_suballoc.
+ *
+ * Return: The size of the allocated range.
+ */
+static inline size_t drm_suballoc_size(struct drm_suballoc *sa)
+{
+ return sa->eoffset - sa->soffset;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
+ struct drm_printer *p,
+ unsigned long long suballoc_base);
+#else
+static inline void
+drm_suballoc_dump_debug_info(struct drm_suballoc_manager *sa_manager,
+ struct drm_printer *p,
+ unsigned long long suballoc_base)
+{ }
+
+#endif
+
+#endif /* _DRM_SUBALLOC_H_ */
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 9db9e5e504ee..898608f87b96 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -48,6 +48,8 @@ struct drm_gem_object;
struct drm_gpu_scheduler;
struct drm_sched_rq;
+struct drm_file;
+
/* These are often used as an (initial) index
* to an array, and as such should start at 0.
*/
@@ -522,6 +524,10 @@ int drm_sched_job_init(struct drm_sched_job *job,
void drm_sched_job_arm(struct drm_sched_job *job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
+int drm_sched_job_add_syncobj_dependency(struct drm_sched_job *job,
+ struct drm_file *file,
+ u32 handle,
+ u32 point);
int drm_sched_job_add_resv_dependencies(struct drm_sched_job *job,
struct dma_resv *resv,
enum dma_resv_usage usage);
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 4f3e81eac6f3..56e82ba2d046 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -141,7 +141,7 @@ struct ttm_device_funcs {
* the graphics address space
* @ctx: context for this move with parameters
* @new_mem: the new memory region receiving the buffer
- @ @hop: placement for driver directed intermediate hop
+ * @hop: placement for driver directed intermediate hop
*
* Move a buffer between two memory regions.
* Returns errno -EMULTIHOP if driver requests a hop
diff --git a/include/linux/fb.h b/include/linux/fb.h
index d8d20514ea05..d96529caa35e 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -765,7 +765,6 @@ struct dmt_videomode {
const struct fb_videomode *mode;
};
-extern const char *fb_mode_option;
extern const struct fb_videomode vesa_modes[];
extern const struct dmt_videomode dmt_modes[];
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 642808520d92..a87bbbbca2d4 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -972,6 +972,19 @@ extern "C" {
#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
#define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl)
+/**
+ * DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
+ *
+ * GEM handles are not reference-counted by the kernel. User-space is
+ * responsible for managing their lifetime. For example, if user-space imports
+ * the same memory object twice on the same DRM file description, the same GEM
+ * handle is returned by both imports, and user-space needs to ensure
+ * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
+ * when a memory object is allocated, then exported and imported again on the
+ * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
+ * and always returns fresh new GEM handles even if an existing GEM handle
+ * already refers to the same memory object before the IOCTL is performed.
+ */
#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
@@ -1012,7 +1025,37 @@ extern "C" {
#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
+/**
+ * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
+ *
+ * User-space sets &drm_prime_handle.handle with the GEM handle to export and
+ * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
+ * &drm_prime_handle.fd.
+ *
+ * The export can fail for any driver-specific reason, e.g. because export is
+ * not supported for this specific GEM handle (but might be for others).
+ *
+ * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
+ */
#define DRM_IOCTL_PRIME_HANDLE_TO_FD DRM_IOWR(0x2d, struct drm_prime_handle)
+/**
+ * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
+ *
+ * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
+ * import, and gets back a GEM handle in &drm_prime_handle.handle.
+ * &drm_prime_handle.flags is unused.
+ *
+ * If an existing GEM handle refers to the memory object backing the DMA-BUF,
+ * that GEM handle is returned. Therefore user-space which needs to handle
+ * arbitrary DMA-BUFs must have a user-space lookup data structure to manually
+ * reference-count duplicated GEM handles. For more information see
+ * &DRM_IOCTL_GEM_CLOSE.
+ *
+ * The import can fail for any driver-specific reason, e.g. because import is
+ * only supported for DMA-BUFs allocated on this DRM device.
+ *
+ * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
+ */
#define DRM_IOCTL_PRIME_FD_TO_HANDLE DRM_IOWR(0x2e, struct drm_prime_handle)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
@@ -1104,8 +1147,13 @@ extern "C" {
* struct as the output.
*
* If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
- * will be filled with GEM buffer handles. Planes are valid until one has a
- * zero handle -- this can be used to compute the number of planes.
+ * will be filled with GEM buffer handles. Fresh new GEM handles are always
+ * returned, even if another GEM handle referring to the same memory object
+ * already exists on the DRM file description. The caller is responsible for
+ * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
+ * new handle will be returned for multiple planes in case they use the same
+ * memory object. Planes are valid until one has a zero handle -- this can be
+ * used to compute the number of planes.
*
* Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
* until one has a zero &drm_mode_fb_cmd2.pitches.
@@ -1113,6 +1161,11 @@ extern "C" {
* If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
* in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
* modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
+ *
+ * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
+ * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
+ * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
+ * double-close handles which are specified multiple times in the array.
*/
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
diff --git a/include/video/cmdline.h b/include/video/cmdline.h
new file mode 100644
index 000000000000..26b80cdaef79
--- /dev/null
+++ b/include/video/cmdline.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef VIDEO_CMDLINE_H
+#define VIDEO_CMDLINE_H
+
+#include <linux/types.h>
+
+#if defined(CONFIG_VIDEO_CMDLINE)
+const char *video_get_options(const char *name);
+
+/* exported for compatibility with fbdev; don't use in new code */
+bool __video_get_options(const char *name, const char **option, bool is_of);
+#else
+static inline const char *video_get_options(const char *name)
+{
+ return NULL;
+}
+#endif
+
+#endif