summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2023-09-29 08:27:00 +1000
committerDave Airlie <airlied@redhat.com>2023-09-29 08:27:15 +1000
commit79fb229b8810071648b65c37382aea7819a5f935 (patch)
treebf201cd7732ad48ad98a963344cb535b95653804 /drivers
parentf107ff76a8c242b298413ef52db9978dc3fe0153 (diff)
parent78f54469b871db5ba8ea49abd4e5994e97bd525b (diff)
Merge tag 'drm-misc-next-2023-09-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for v6.7-rc1: UAPI Changes: - drm_file owner is now updated during use, in the case of a drm fd opened by the display server for a client, the correct owner is displayed. - Qaic gains support for the QAIC_DETACH_SLICE_BO ioctl to allow bo recycling. Cross-subsystem Changes: - Disable boot logo for au1200fb, mmpfb and unexport logo helpers. Only fbcon should manage display of logo. - Update freescale in MAINTAINERS. - Add some bridge files to bridge in MAINTAINERS. - Update gma500 driver repo in MAINTAINERS to point to drm-misc. Core Changes: - Move size computations to drm buddy allocator. - Make drm_atomic_helper_shutdown(NULL) a nop. - Assorted small fixes in drm_debugfs, DP-MST payload addition error handling. - Fix DRM_BRIDGE_ATTACH_NO_CONNECTOR handling. - Handle bad (h/v)sync_end in EDID by clipping to htotal. - Build GPUVM as a module. Driver Changes: - Simple drivers don't need to cache prepared result. - Call drm_atomic_helper_shutdown() in shutdown/unbind for a whole lot more drm drivers. - Assorted small fixes in amdgpu, ssd130x, bridge/it6621, accel/qaic, nouveau, tc358768. - Add NV12 for komeda writeback. - Add arbitration lost event to synopsis/dw-hdmi-cec. - Speed up s/r in nouveau by not restoring some big bo's. - Assorted nouveau display rework in preparation for GSP-RM, especially related to how the modeset sequence works and the DP sequence in relation to link training. - Update anx7816 panel. - Support NVSYNC and NHSYNC in tegra. - Allow multiple power domains in simple driver. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/f1fae5eb-25b8-192a-9a53-215e1184ce81@linux.intel.com
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/qaic/qaic.h13
-rw-r--r--drivers/accel/qaic/qaic_data.c187
-rw-r--r--drivers/accel/qaic/qaic_drv.c119
-rw-r--r--drivers/gpu/drm/Kconfig7
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c58
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c7
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h1
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c6
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c6
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c8
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c7
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c6
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c3
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c4
-rw-r--r--drivers/gpu/drm/bridge/panel.c17
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c383
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c23
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c3
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_buddy.c138
-rw-r--r--drivers/gpu/drm/drm_debugfs.c26
-rw-r--r--drivers/gpu/drm/drm_edid.c18
-rw-r--r--drivers/gpu/drm/drm_file.c40
-rw-r--r--drivers/gpu/drm/drm_gpuvm.c (renamed from drivers/gpu/drm/drm_gpuva_mgr.c)407
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c8
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c6
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c9
-rw-r--r--drivers/gpu/drm/hyperv/hyperv_drm_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c23
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-drv.c8
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c7
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.h1
-rw-r--r--drivers/gpu/drm/imx/ipuv3/imx-drm-core.c8
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm-drv.c66
-rw-r--r--drivers/gpu/drm/logicvc/logicvc_drm.c9
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c6
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c8
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c511
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.h1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/conn.h20
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0011.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/if0012.h249
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/outp.h96
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/memory.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c90
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c251
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c345
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_exec.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_uvmm.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/conn.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvif/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvif/outp.c412
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/firmware.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c146
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c362
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c182
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c144
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c460
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c8
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c9
-rw-r--r--drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c9
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c9
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35950.c9
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt36523.c12
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c17
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c38
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c25
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-sofef00.c9
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c10
-rw-r--r--drivers/gpu/drm/panel/panel-sony-td4353-jdi.c9
-rw-r--r--drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c18
-rw-r--r--drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c11
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c20
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-r66451.c16
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c8
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-vtdr6130.c9
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c7
-rw-r--r--drivers/gpu/drm/solomon/ssd130x.c194
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c7
-rw-r--r--drivers/gpu/drm/stm/drv.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c6
-rw-r--r--drivers/gpu/drm/tegra/rgb.c16
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c11
-rw-r--r--drivers/gpu/drm/tiny/bochs.c6
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c6
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c105
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c7
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c10
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gem.c6
-rw-r--r--drivers/video/fbdev/au1200fb.c9
-rw-r--r--drivers/video/fbdev/core/Makefile2
-rw-r--r--drivers/video/fbdev/core/fb_internal.h17
-rw-r--r--drivers/video/fbdev/core/fb_logo.c508
-rw-r--r--drivers/video/fbdev/core/fbcon.c2
-rw-r--r--drivers/video/fbdev/core/fbmem.c542
-rw-r--r--drivers/video/fbdev/mmp/fb/mmpfb.c7
147 files changed, 4521 insertions, 2700 deletions
diff --git a/drivers/accel/qaic/qaic.h b/drivers/accel/qaic/qaic.h
index f2bd637a0d4e..e3f4c30f3ffd 100644
--- a/drivers/accel/qaic/qaic.h
+++ b/drivers/accel/qaic/qaic.h
@@ -27,6 +27,9 @@
#define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE)
#define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base)
+#define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
+#define to_drm(qddev) (&(qddev)->drm)
+#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
extern bool datapath_polling;
@@ -137,6 +140,8 @@ struct qaic_device {
};
struct qaic_drm_device {
+ /* The drm device struct of this drm device */
+ struct drm_device drm;
/* Pointer to the root device struct driven by this driver */
struct qaic_device *qdev;
/*
@@ -146,8 +151,6 @@ struct qaic_drm_device {
* device is the actual physical device
*/
s32 partition_id;
- /* Pointer to the drm device struct of this drm device */
- struct drm_device *ddev;
/* Head in list of users who have opened this drm device */
struct list_head users;
/* Synchronizes access to users list */
@@ -158,8 +161,6 @@ struct qaic_bo {
struct drm_gem_object base;
/* Scatter/gather table for allocate/imported BO */
struct sg_table *sgt;
- /* BO size requested by user. GEM object might be bigger in size. */
- u64 size;
/* Head in list of slices of this BO */
struct list_head slices;
/* Total nents, for all slices of this BO */
@@ -221,7 +222,8 @@ struct qaic_bo {
*/
u32 queue_level_before;
} perf_stats;
-
+ /* Synchronizes BO operations */
+ struct mutex lock;
};
struct bo_slice {
@@ -277,6 +279,7 @@ int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *f
int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
void irq_polling_work(struct work_struct *work);
#endif /* _QAIC_H_ */
diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
index f4b06792c6f1..4a8e43a7a6a4 100644
--- a/drivers/accel/qaic/qaic_data.c
+++ b/drivers/accel/qaic/qaic_data.c
@@ -154,6 +154,7 @@ static void free_slice(struct kref *kref)
{
struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);
+ slice->bo->total_slice_nents -= slice->nents;
list_del(&slice->slice);
drm_gem_object_put(&slice->bo->base);
sg_free_table(slice->sgt);
@@ -579,7 +580,7 @@ static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent,
{
struct qaic_bo *bo = to_qaic_bo(obj);
- drm_printf_indent(p, indent, "user requested size=%llu\n", bo->size);
+ drm_printf_indent(p, indent, "BO DMA direction %d\n", bo->dir);
}
static const struct vm_operations_struct drm_vm_ops = {
@@ -623,6 +624,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
qaic_free_sgt(bo->sgt);
}
+ mutex_destroy(&bo->lock);
drm_gem_object_release(obj);
kfree(bo);
}
@@ -634,6 +636,19 @@ static const struct drm_gem_object_funcs qaic_gem_funcs = {
.vm_ops = &drm_vm_ops,
};
+static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
+{
+ if (reinit) {
+ bo->sliced = false;
+ reinit_completion(&bo->xfer_done);
+ } else {
+ mutex_init(&bo->lock);
+ init_completion(&bo->xfer_done);
+ }
+ complete_all(&bo->xfer_done);
+ INIT_LIST_HEAD(&bo->slices);
+}
+
static struct qaic_bo *qaic_alloc_init_bo(void)
{
struct qaic_bo *bo;
@@ -642,9 +657,7 @@ static struct qaic_bo *qaic_alloc_init_bo(void)
if (!bo)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&bo->slices);
- init_completion(&bo->xfer_done);
- complete_all(&bo->xfer_done);
+ qaic_init_bo(bo, false);
return bo;
}
@@ -695,8 +708,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
if (ret)
goto free_bo;
- bo->size = args->size;
-
ret = drm_gem_handle_create(file_priv, obj, &args->handle);
if (ret)
goto free_sgt;
@@ -828,7 +839,6 @@ static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_h
}
bo->sgt = sgt;
- bo->size = hdr->size;
return 0;
}
@@ -838,7 +848,7 @@ static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
{
int ret;
- if (bo->size != hdr->size)
+ if (bo->base.size < hdr->size)
return -EINVAL;
ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
@@ -857,9 +867,9 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
ret = qaic_prepare_import_bo(bo, hdr);
else
ret = qaic_prepare_export_bo(qdev, bo, hdr);
-
- if (ret == 0)
- bo->dir = hdr->dir;
+ bo->dir = hdr->dir;
+ bo->dbc = &qdev->dbc[hdr->dbc_id];
+ bo->nr_slice = hdr->count;
return ret;
}
@@ -868,7 +878,6 @@ static void qaic_unprepare_import_bo(struct qaic_bo *bo)
{
dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir);
bo->sgt = NULL;
- bo->size = 0;
}
static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo)
@@ -884,6 +893,8 @@ static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
qaic_unprepare_export_bo(qdev, bo);
bo->dir = 0;
+ bo->dbc = NULL;
+ bo->nr_slice = 0;
}
static void qaic_free_slices_bo(struct qaic_bo *bo)
@@ -892,6 +903,9 @@ static void qaic_free_slices_bo(struct qaic_bo *bo)
list_for_each_entry_safe(slice, temp, &bo->slices, slice)
kref_put(&slice->ref_count, free_slice);
+ if (WARN_ON_ONCE(bo->total_slice_nents != 0))
+ bo->total_slice_nents = 0;
+ bo->nr_slice = 0;
}
static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
@@ -908,15 +922,11 @@ static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
}
}
- if (bo->total_slice_nents > qdev->dbc[hdr->dbc_id].nelem) {
+ if (bo->total_slice_nents > bo->dbc->nelem) {
qaic_free_slices_bo(bo);
return -ENOSPC;
}
- bo->sliced = true;
- bo->nr_slice = hdr->count;
- list_add_tail(&bo->bo_list, &qdev->dbc[hdr->dbc_id].bo_lists);
-
return 0;
}
@@ -994,10 +1004,13 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
}
bo = to_qaic_bo(obj);
+ ret = mutex_lock_interruptible(&bo->lock);
+ if (ret)
+ goto put_bo;
if (bo->sliced) {
ret = -EINVAL;
- goto put_bo;
+ goto unlock_bo;
}
dbc = &qdev->dbc[args->hdr.dbc_id];
@@ -1018,9 +1031,10 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
if (args->hdr.dir == DMA_TO_DEVICE)
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir);
- bo->dbc = dbc;
+ bo->sliced = true;
+ list_add_tail(&bo->bo_list, &bo->dbc->bo_lists);
srcu_read_unlock(&dbc->ch_lock, rcu_id);
- drm_gem_object_put(obj);
+ mutex_unlock(&bo->lock);
kfree(slice_ent);
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
@@ -1031,6 +1045,8 @@ unprepare_bo:
qaic_unprepare_bo(qdev, bo);
unlock_ch_srcu:
srcu_read_unlock(&dbc->ch_lock, rcu_id);
+unlock_bo:
+ mutex_unlock(&bo->lock);
put_bo:
drm_gem_object_put(obj);
free_slice_ent:
@@ -1185,15 +1201,18 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
}
bo = to_qaic_bo(obj);
+ ret = mutex_lock_interruptible(&bo->lock);
+ if (ret)
+ goto failed_to_send_bo;
if (!bo->sliced) {
ret = -EINVAL;
- goto failed_to_send_bo;
+ goto unlock_bo;
}
- if (is_partial && pexec[i].resize > bo->size) {
+ if (is_partial && pexec[i].resize > bo->base.size) {
ret = -EINVAL;
- goto failed_to_send_bo;
+ goto unlock_bo;
}
spin_lock_irqsave(&dbc->xfer_lock, flags);
@@ -1202,7 +1221,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
if (queued) {
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
ret = -EINVAL;
- goto failed_to_send_bo;
+ goto unlock_bo;
}
bo->req_id = dbc->next_req_id++;
@@ -1233,17 +1252,20 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
if (ret) {
bo->queued = false;
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
- goto failed_to_send_bo;
+ goto unlock_bo;
}
}
reinit_completion(&bo->xfer_done);
list_add_tail(&bo->xfer_list, &dbc->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir);
+ mutex_unlock(&bo->lock);
}
return 0;
+unlock_bo:
+ mutex_unlock(&bo->lock);
failed_to_send_bo:
if (likely(obj))
drm_gem_object_put(obj);
@@ -1799,6 +1821,91 @@ unlock_usr_srcu:
return ret;
}
+static void detach_slice_bo(struct qaic_device *qdev, struct qaic_bo *bo)
+{
+ qaic_free_slices_bo(bo);
+ qaic_unprepare_bo(qdev, bo);
+ qaic_init_bo(bo, true);
+ list_del(&bo->bo_list);
+ drm_gem_object_put(&bo->base);
+}
+
+int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct qaic_detach_slice *args = data;
+ int rcu_id, usr_rcu_id, qdev_rcu_id;
+ struct dma_bridge_chan *dbc;
+ struct drm_gem_object *obj;
+ struct qaic_device *qdev;
+ struct qaic_user *usr;
+ unsigned long flags;
+ struct qaic_bo *bo;
+ int ret;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ usr = file_priv->driver_priv;
+ usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
+ if (!usr->qddev) {
+ ret = -ENODEV;
+ goto unlock_usr_srcu;
+ }
+
+ qdev = usr->qddev->qdev;
+ qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
+ if (qdev->in_reset) {
+ ret = -ENODEV;
+ goto unlock_dev_srcu;
+ }
+
+ obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto unlock_dev_srcu;
+ }
+
+ bo = to_qaic_bo(obj);
+ ret = mutex_lock_interruptible(&bo->lock);
+ if (ret)
+ goto put_bo;
+
+ if (!bo->sliced) {
+ ret = -EINVAL;
+ goto unlock_bo;
+ }
+
+ dbc = bo->dbc;
+ rcu_id = srcu_read_lock(&dbc->ch_lock);
+ if (dbc->usr != usr) {
+ ret = -EINVAL;
+ goto unlock_ch_srcu;
+ }
+
+ /* Check if BO is committed to H/W for DMA */
+ spin_lock_irqsave(&dbc->xfer_lock, flags);
+ if (bo->queued) {
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ ret = -EBUSY;
+ goto unlock_ch_srcu;
+ }
+ spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+
+ detach_slice_bo(qdev, bo);
+
+unlock_ch_srcu:
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+unlock_bo:
+ mutex_unlock(&bo->lock);
+put_bo:
+ drm_gem_object_put(obj);
+unlock_dev_srcu:
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+unlock_usr_srcu:
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+ return ret;
+}
+
static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
{
unsigned long flags;
@@ -1810,6 +1917,12 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
bo->queued = false;
list_del(&bo->xfer_list);
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
+ bo->nr_slice_xfer_done = 0;
+ bo->req_id = 0;
+ bo->perf_stats.req_received_ts = 0;
+ bo->perf_stats.req_submit_ts = 0;
+ bo->perf_stats.req_processed_ts = 0;
+ bo->perf_stats.queue_level_before = 0;
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
complete_all(&bo->xfer_done);
drm_gem_object_put(&bo->base);
@@ -1857,7 +1970,6 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
void release_dbc(struct qaic_device *qdev, u32 dbc_id)
{
- struct bo_slice *slice, *slice_temp;
struct qaic_bo *bo, *bo_temp;
struct dma_bridge_chan *dbc;
@@ -1875,24 +1987,11 @@ void release_dbc(struct qaic_device *qdev, u32 dbc_id)
dbc->usr = NULL;
list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) {
- list_for_each_entry_safe(slice, slice_temp, &bo->slices, slice)
- kref_put(&slice->ref_count, free_slice);
- bo->sliced = false;
- INIT_LIST_HEAD(&bo->slices);
- bo->total_slice_nents = 0;
- bo->dir = 0;
- bo->dbc = NULL;
- bo->nr_slice = 0;
- bo->nr_slice_xfer_done = 0;
- bo->queued = false;
- bo->req_id = 0;
- init_completion(&bo->xfer_done);
- complete_all(&bo->xfer_done);
- list_del(&bo->bo_list);
- bo->perf_stats.req_received_ts = 0;
- bo->perf_stats.req_submit_ts = 0;
- bo->perf_stats.req_processed_ts = 0;
- bo->perf_stats.queue_level_before = 0;
+ drm_gem_object_get(&bo->base);
+ mutex_lock(&bo->lock);
+ detach_slice_bo(qdev, bo);
+ mutex_unlock(&bo->lock);
+ drm_gem_object_put(&bo->base);
}
dbc->in_use = false;
diff --git a/drivers/accel/qaic/qaic_drv.c b/drivers/accel/qaic/qaic_drv.c
index b5de82e6eb4d..6f58095767df 100644
--- a/drivers/accel/qaic/qaic_drv.c
+++ b/drivers/accel/qaic/qaic_drv.c
@@ -22,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <uapi/drm/qaic_accel.h>
#include "mhi_controller.h"
@@ -55,7 +56,7 @@ static void free_usr(struct kref *kref)
static int qaic_open(struct drm_device *dev, struct drm_file *file)
{
- struct qaic_drm_device *qddev = dev->dev_private;
+ struct qaic_drm_device *qddev = to_qaic_drm_device(dev);
struct qaic_device *qdev = qddev->qdev;
struct qaic_user *usr;
int rcu_id;
@@ -150,6 +151,7 @@ static const struct drm_ioctl_desc qaic_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0),
DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0),
+ DRM_IOCTL_DEF_DRV(QAIC_DETACH_SLICE_BO, qaic_detach_slice_bo_ioctl, 0),
};
static const struct drm_driver qaic_accel_driver = {
@@ -170,64 +172,39 @@ static const struct drm_driver qaic_accel_driver = {
static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
{
- struct qaic_drm_device *qddev;
- struct drm_device *ddev;
- struct device *pdev;
+ struct qaic_drm_device *qddev = qdev->qddev;
+ struct drm_device *drm = to_drm(qddev);
int ret;
/* Hold off implementing partitions until the uapi is determined */
if (partition_id != QAIC_NO_PARTITION)
return -EINVAL;
- pdev = &qdev->pdev->dev;
-
- qddev = kzalloc(sizeof(*qddev), GFP_KERNEL);
- if (!qddev)
- return -ENOMEM;
-
- ddev = drm_dev_alloc(&qaic_accel_driver, pdev);
- if (IS_ERR(ddev)) {
- ret = PTR_ERR(ddev);
- goto ddev_fail;
- }
-
- ddev->dev_private = qddev;
- qddev->ddev = ddev;
-
- qddev->qdev = qdev;
qddev->partition_id = partition_id;
- INIT_LIST_HEAD(&qddev->users);
- mutex_init(&qddev->users_mutex);
-
- qdev->qddev = qddev;
-
- ret = drm_dev_register(ddev, 0);
- if (ret) {
- pci_dbg(qdev->pdev, "%s: drm_dev_register failed %d\n", __func__, ret);
- goto drm_reg_fail;
- }
- return 0;
+ /*
+ * drm_dev_unregister() sets the driver data to NULL and
+ * drm_dev_register() does not update the driver data. During a SOC
+ * reset drm dev is unregistered and registered again leaving the
+ * driver data to NULL.
+ */
+ dev_set_drvdata(to_accel_kdev(qddev), drm->accel);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
-drm_reg_fail:
- mutex_destroy(&qddev->users_mutex);
- qdev->qddev = NULL;
- drm_dev_put(ddev);
-ddev_fail:
- kfree(qddev);
return ret;
}
static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
{
- struct qaic_drm_device *qddev;
+ struct qaic_drm_device *qddev = qdev->qddev;
+ struct drm_device *drm = to_drm(qddev);
struct qaic_user *usr;
- qddev = qdev->qddev;
- qdev->qddev = NULL;
- if (!qddev)
- return;
-
+ drm_dev_get(drm);
+ drm_dev_unregister(drm);
+ qddev->partition_id = 0;
/*
* Existing users get unresolvable errors till they close FDs.
* Need to sync carefully with users calling close(). The
@@ -254,13 +231,7 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
mutex_lock(&qddev->users_mutex);
}
mutex_unlock(&qddev->users_mutex);
-
- if (qddev->ddev) {
- drm_dev_unregister(qddev->ddev);
- drm_dev_put(qddev->ddev);
- }
-
- kfree(qddev);
+ drm_dev_put(drm);
}
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
@@ -344,8 +315,20 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
qdev->in_reset = false;
}
+static void cleanup_qdev(struct qaic_device *qdev)
+{
+ int i;
+
+ for (i = 0; i < qdev->num_dbc; ++i)
+ cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
+ cleanup_srcu_struct(&qdev->dev_lock);
+ pci_set_drvdata(qdev->pdev, NULL);
+ destroy_workqueue(qdev->cntl_wq);
+}
+
static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct qaic_drm_device *qddev;
struct qaic_device *qdev;
int i;
@@ -381,18 +364,18 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
}
- return qdev;
-}
+ qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
+ if (IS_ERR(qddev)) {
+ cleanup_qdev(qdev);
+ return NULL;
+ }
-static void cleanup_qdev(struct qaic_device *qdev)
-{
- int i;
+ drmm_mutex_init(to_drm(qddev), &qddev->users_mutex);
+ INIT_LIST_HEAD(&qddev->users);
+ qddev->qdev = qdev;
+ qdev->qddev = qddev;
- for (i = 0; i < qdev->num_dbc; ++i)
- cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
- cleanup_srcu_struct(&qdev->dev_lock);
- pci_set_drvdata(qdev->pdev, NULL);
- destroy_workqueue(qdev->cntl_wq);
+ return qdev;
}
static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
@@ -591,22 +574,22 @@ static int __init qaic_init(void)
{
int ret;
- ret = mhi_driver_register(&qaic_mhi_driver);
+ ret = pci_register_driver(&qaic_pci_driver);
if (ret) {
- pr_debug("qaic: mhi_driver_register failed %d\n", ret);
+ pr_debug("qaic: pci_register_driver failed %d\n", ret);
return ret;
}
- ret = pci_register_driver(&qaic_pci_driver);
+ ret = mhi_driver_register(&qaic_mhi_driver);
if (ret) {
- pr_debug("qaic: pci_register_driver failed %d\n", ret);
- goto free_mhi;
+ pr_debug("qaic: mhi_driver_register failed %d\n", ret);
+ goto free_pci;
}
return 0;
-free_mhi:
- mhi_driver_unregister(&qaic_mhi_driver);
+free_pci:
+ pci_unregister_driver(&qaic_pci_driver);
return ret;
}
@@ -628,8 +611,8 @@ static void __exit qaic_exit(void)
* reinitializing the link_up state after the cleanup is done.
*/
link_up = true;
- pci_unregister_driver(&qaic_pci_driver);
mhi_driver_unregister(&qaic_mhi_driver);
+ pci_unregister_driver(&qaic_pci_driver);
}
module_init(qaic_init);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index ab9ef1c20349..48ca28a2e4ff 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -216,6 +216,13 @@ config DRM_EXEC
help
Execution context for command submissions
+config DRM_GPUVM
+ tristate
+ depends on DRM
+ help
+ GPU-VM representation providing helpers to manage a GPUs virtual
+ address space
+
config DRM_BUDDY
tristate
depends on DRM
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 215e78e79125..8e1bde059170 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -45,7 +45,6 @@ drm-y := \
drm_vblank.o \
drm_vblank_work.o \
drm_vma_manager.o \
- drm_gpuva_mgr.o \
drm_writeback.o
drm-$(CONFIG_DRM_LEGACY) += \
drm_agpsupport.o \
@@ -81,6 +80,7 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
#
#
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
+obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index ca4d2d430e28..a1b15d0d6c48 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -962,6 +962,7 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
list_for_each_entry(file, &dev->filelist, lhead) {
struct task_struct *task;
struct drm_gem_object *gobj;
+ struct pid *pid;
int id;
/*
@@ -971,8 +972,9 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
* Therefore, we need to protect this ->comm access using RCU.
*/
rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_TGID);
- seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+ seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
task ? task->comm : "<unknown>");
rcu_read_unlock();
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index c7085a747b03..18f58efc9dc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -424,9 +424,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_resource **res)
{
- u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_device *adev = to_amdgpu_device(mgr);
+ u64 vis_usage = 0, max_bytes, min_block_size;
struct amdgpu_vram_mgr_resource *vres;
u64 size, remaining_size, lpfn, fpfn;
struct drm_buddy *mm = &mgr->mm;
@@ -474,6 +474,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+ vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+
if (fpfn || lpfn != mgr->mm.size)
/* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
@@ -496,25 +499,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
min_block_size = (u64)pages_per_block << PAGE_SHIFT;
- cur_size = size;
-
- if (fpfn + size != (u64)place->lpfn << PAGE_SHIFT) {
- /*
- * Except for actual range allocation, modify the size and
- * min_block_size conforming to continuous flag enablement
- */
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
- size = roundup_pow_of_two(size);
- min_block_size = size;
- /*
- * Modify the size value if size is not
- * aligned with min_block_size
- */
- } else if (!IS_ALIGNED(size, min_block_size)) {
- size = round_up(size, min_block_size);
- }
- }
-
r = drm_buddy_alloc_blocks(mm, fpfn,
lpfn,
size,
@@ -531,40 +515,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
}
mutex_unlock(&mgr->lock);
- if (cur_size != size) {
- struct drm_buddy_block *block;
- struct list_head *trim_list;
- u64 original_size;
- LIST_HEAD(temp);
-
- trim_list = &vres->blocks;
- original_size = (u64)vres->base.size;
-
- /*
- * If size value is rounded up to min_block_size, trim the last
- * block to the required size
- */
- if (!list_is_singular(&vres->blocks)) {
- block = list_last_entry(&vres->blocks, typeof(*block), link);
- list_move_tail(&block->link, &temp);
- trim_list = &temp;
- /*
- * Compute the original_size value by subtracting the
- * last block size with (aligned size - original size)
- */
- original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
- }
-
- mutex_lock(&mgr->lock);
- drm_buddy_block_trim(mm,
- original_size,
- trim_list);
- mutex_unlock(&mgr->lock);
-
- if (!list_empty(&temp))
- list_splice_tail(trim_list, &vres->blocks);
- }
-
vres->base.start = 0;
list_for_each_entry(block, &vres->blocks, link) {
unsigned long start;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index cbef4ff28cd8..baf7e5254fb3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -343,7 +343,7 @@ bool dm_helpers_dp_mst_send_payload_allocation(
struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state;
struct drm_dp_mst_topology_mgr *mst_mgr;
- struct drm_dp_mst_atomic_payload *new_payload, *old_payload;
+ struct drm_dp_mst_atomic_payload *new_payload, old_payload;
enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
int ret = 0;
@@ -367,8 +367,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(
ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
} else {
dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
- new_payload, old_payload);
- drm_dp_remove_payload_part2(mst_mgr, mst_state, old_payload, new_payload);
+ new_payload, &old_payload);
+ drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload);
}
if (ret) {
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index 6c56f5662bc7..80973975bfdb 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -521,7 +521,7 @@ static struct komeda_format_caps d71_format_caps_table[] = {
{__HW_ID(5, 1), DRM_FORMAT_YUYV, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */
{__HW_ID(5, 2), DRM_FORMAT_YUYV, RICH, Flip_H_V, 0, 0},
{__HW_ID(5, 3), DRM_FORMAT_UYVY, RICH, Flip_H_V, 0, 0},
- {__HW_ID(5, 6), DRM_FORMAT_NV12, RICH, Flip_H_V, 0, 0},
+ {__HW_ID(5, 6), DRM_FORMAT_NV12, RICH_WB, Flip_H_V, 0, 0},
{__HW_ID(5, 6), DRM_FORMAT_YUV420_8BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */
{__HW_ID(5, 7), DRM_FORMAT_YUV420, RICH, Flip_H_V, 0, 0},
/* YUV 10bit*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index cb2a2be24c5f..cc57ea4e13ae 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -45,6 +45,14 @@ static void komeda_platform_remove(struct platform_device *pdev)
devm_kfree(dev, mdrv);
}
+static void komeda_platform_shutdown(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+ komeda_kms_shutdown(mdrv->kms);
+}
+
static int komeda_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -142,6 +150,7 @@ static const struct dev_pm_ops komeda_pm_ops = {
static struct platform_driver komeda_platform_driver = {
.probe = komeda_platform_probe,
.remove_new = komeda_platform_remove,
+ .shutdown = komeda_platform_shutdown,
.driver = {
.name = "komeda",
.of_match_table = komeda_of_match,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 9299026701f3..fe46b0ebefea 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -340,3 +340,10 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
}
+
+void komeda_kms_shutdown(struct komeda_kms_dev *kms)
+{
+ struct drm_device *drm = &kms->base;
+
+ drm_atomic_helper_shutdown(drm);
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 6ef655326357..a4048724564d 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -190,5 +190,6 @@ void komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev);
void komeda_kms_detach(struct komeda_kms_dev *kms);
+void komeda_kms_shutdown(struct komeda_kms_dev *kms);
#endif /*_KOMEDA_KMS_H_*/
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index aa06f9838015..32be9e370049 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -372,6 +372,11 @@ static void hdlcd_remove(struct platform_device *pdev)
component_master_del(&pdev->dev, &hdlcd_master_ops);
}
+static void hdlcd_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static const struct of_device_id hdlcd_of_match[] = {
{ .compatible = "arm,hdlcd" },
{},
@@ -399,6 +404,7 @@ static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume);
static struct platform_driver hdlcd_platform_driver = {
.probe = hdlcd_probe,
.remove_new = hdlcd_remove,
+ .shutdown = hdlcd_shutdown,
.driver = {
.name = "hdlcd",
.pm = &hdlcd_pm_ops,
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 62329d5dd992..6682131d2910 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -941,6 +941,11 @@ static void malidp_platform_remove(struct platform_device *pdev)
component_master_del(&pdev->dev, &malidp_master_ops);
}
+static void malidp_platform_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static int __maybe_unused malidp_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
@@ -982,6 +987,7 @@ static const struct dev_pm_ops malidp_pm_ops = {
static struct platform_driver malidp_platform_driver = {
.probe = malidp_platform_probe,
.remove_new = malidp_platform_remove,
+ .shutdown = malidp_platform_shutdown,
.driver = {
.name = "mali-dp",
.pm = &malidp_pm_ops,
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index e8d2fe955909..fa1c67598706 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -148,6 +148,7 @@ static int armada_drm_bind(struct device *dev)
err_kms:
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
+ dev_set_drvdata(dev, NULL);
return ret;
}
@@ -166,6 +167,7 @@ static void armada_drm_unbind(struct device *dev)
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
+ dev_set_drvdata(dev, NULL);
}
static void armada_add_endpoints(struct device *dev,
@@ -230,6 +232,11 @@ static int armada_drm_remove(struct platform_device *pdev)
return 0;
}
+static void armada_drm_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static const struct platform_device_id armada_drm_platform_ids[] = {
{
.name = "armada-drm",
@@ -243,6 +250,7 @@ MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
static struct platform_driver armada_drm_platform_driver = {
.probe = armada_drm_probe,
.remove = armada_drm_remove,
+ .shutdown = armada_drm_shutdown,
.driver = {
.name = "armada-drm",
},
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index d207b03f8357..78122b35a0cb 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -358,11 +358,18 @@ static void aspeed_gfx_remove(struct platform_device *pdev)
sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
drm_dev_unregister(drm);
aspeed_gfx_unload(drm);
+ drm_atomic_helper_shutdown(drm);
+}
+
+static void aspeed_gfx_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
}
static struct platform_driver aspeed_gfx_platform_driver = {
.probe = aspeed_gfx_probe,
.remove_new = aspeed_gfx_remove,
+ .shutdown = aspeed_gfx_shutdown,
.driver = {
.name = "aspeed_gfx",
.of_match_table = aspeed_gfx_match,
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index e1224ef4ad83..cf5b754f044c 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -125,6 +125,11 @@ static void ast_pci_remove(struct pci_dev *pdev)
drm_atomic_helper_shutdown(dev);
}
+static void ast_pci_shutdown(struct pci_dev *pdev)
+{
+ drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+}
+
static int ast_drm_freeze(struct drm_device *dev)
{
int error;
@@ -209,6 +214,7 @@ static struct pci_driver ast_pci_driver = {
.id_table = ast_pciidlist,
.probe = ast_pci_probe,
.remove = ast_pci_remove,
+ .shutdown = ast_pci_shutdown,
.driver.pm = &ast_pm_ops,
};
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index fa0f9a93d50d..84c54e8622d1 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -782,6 +782,11 @@ static void atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
drm_dev_put(ddev);
}
+static void atmel_hlcdc_dc_drm_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
@@ -825,6 +830,7 @@ static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
static struct platform_driver atmel_hlcdc_dc_platform_driver = {
.probe = atmel_hlcdc_dc_drm_probe,
.remove_new = atmel_hlcdc_dc_drm_remove,
+ .shutdown = atmel_hlcdc_dc_drm_shutdown,
.driver = {
.name = "atmel-hlcdc-display-controller",
.pm = pm_sleep_ptr(&atmel_hlcdc_dc_drm_pm_ops),
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index ad8241758896..5748a8581af4 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -1211,6 +1211,7 @@ static const u16 anx78xx_chipid_list[] = {
0x7808,
0x7812,
0x7814,
+ 0x7816,
0x7818,
};
@@ -1369,6 +1370,7 @@ static const struct of_device_id anx78xx_match_table[] = {
{ .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
{ .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
{ .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses },
+ { .compatible = "analogix,anx7816", .data = anx781x_i2c_addresses },
{ .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses },
{ /* sentinel */ },
};
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 51abe42c639e..8f740154707d 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1464,6 +1464,9 @@ static int _anx7625_hpd_polling(struct anx7625_data *ctx,
if (ctx->pdata.intp_irq)
return 0;
+ /* Delay 200ms for FW HPD de-bounce */
+ msleep(200);
+
ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
ctx, val,
((val & HPD_STATUS) || (val < 0)),
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 3c9b42c9d2ee..1cf3fb1f13dc 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -884,14 +884,14 @@ static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
mutex_lock(&ctx->lock);
ret = it66121_preamble_ddc(ctx);
if (ret) {
- edid = ERR_PTR(ret);
+ edid = NULL;
goto out_unlock;
}
ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
IT66121_DDC_HEADER_EDID);
if (ret) {
- edid = ERR_PTR(ret);
+ edid = NULL;
goto out_unlock;
}
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index e00d2e94c751..e48823a4f1ed 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -67,14 +67,6 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
struct drm_device *drm_dev = bridge->dev;
int ret;
- if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
- return 0;
-
- if (!bridge->encoder) {
- DRM_ERROR("Missing encoder\n");
- return -ENODEV;
- }
-
panel_bridge->link = device_link_add(drm_dev->dev, panel->dev,
DL_FLAG_STATELESS);
if (!panel_bridge->link) {
@@ -83,6 +75,15 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Missing encoder\n");
+ device_link_del(panel_bridge->link);
+ return -ENODEV;
+ }
+
drm_connector_helper_add(connector,
&panel_bridge_connector_helper_funcs);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index be21c11de1f2..673661160e54 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -145,6 +145,10 @@ static irqreturn_t dw_hdmi_cec_hardirq(int irq, void *data)
cec->tx_status = CEC_TX_STATUS_NACK;
cec->tx_done = true;
ret = IRQ_WAKE_THREAD;
+ } else if (stat & CEC_STAT_ARBLOST) {
+ cec->tx_status = CEC_TX_STATUS_ARB_LOST;
+ cec->tx_done = true;
+ ret = IRQ_WAKE_THREAD;
}
if (stat & CEC_STAT_EOM) {
@@ -209,7 +213,7 @@ static int dw_hdmi_cec_enable(struct cec_adapter *adap, bool enable)
cec->ops->enable(cec->hdmi);
irqs = CEC_STAT_ERROR_INIT | CEC_STAT_NACK | CEC_STAT_EOM |
- CEC_STAT_DONE;
+ CEC_STAT_ARBLOST | CEC_STAT_DONE;
dw_hdmi_write(cec, irqs, HDMI_CEC_POLARITY);
dw_hdmi_write(cec, ~irqs, HDMI_CEC_MASK);
dw_hdmi_write(cec, ~irqs, HDMI_IH_MUTE_CEC_STAT0);
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 819a4b6ec2a0..0e8813278a2f 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -9,12 +9,14 @@
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
+#include <linux/math64.h>
#include <linux/media-bus-format.h>
#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/units.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
@@ -156,6 +158,7 @@ struct tc358768_priv {
u32 frs; /* PLL Freqency range for HSCK (post divider) */
u32 dsiclk; /* pll_clk / 2 */
+ u32 pclk; /* incoming pclk rate */
};
static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host
@@ -216,6 +219,10 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
u32 tmp, orig;
tc358768_read(priv, reg, &orig);
+
+ if (priv->error)
+ return;
+
tmp = orig & ~mask;
tmp |= val & mask;
if (tmp != orig)
@@ -312,7 +319,7 @@ static int tc358768_calc_pll(struct tc358768_priv *priv,
target_pll = tc358768_pclk_to_pll(priv, mode->clock * 1000);
- /* pll_clk = RefClk * [(FBD + 1)/ (PRD + 1)] * [1 / (2^FRS)] */
+ /* pll_clk = RefClk * FBD / PRD * (1 / (2^FRS)) */
for (i = 0; i < ARRAY_SIZE(frs_limits); i++)
if (target_pll >= frs_limits[i])
@@ -332,19 +339,19 @@ static int tc358768_calc_pll(struct tc358768_priv *priv,
best_prd = 0;
best_fbd = 0;
- for (prd = 0; prd < 16; ++prd) {
- u32 divisor = (prd + 1) * (1 << frs);
+ for (prd = 1; prd <= 16; ++prd) {
+ u32 divisor = prd * (1 << frs);
u32 fbd;
- for (fbd = 0; fbd < 512; ++fbd) {
+ for (fbd = 1; fbd <= 512; ++fbd) {
u32 pll, diff, pll_in;
- pll = (u32)div_u64((u64)refclk * (fbd + 1), divisor);
+ pll = (u32)div_u64((u64)refclk * fbd, divisor);
if (pll >= max_pll || pll < min_pll)
continue;
- pll_in = (u32)div_u64((u64)refclk, prd + 1);
+ pll_in = (u32)div_u64((u64)refclk, prd);
if (pll_in < 4000000)
continue;
@@ -375,6 +382,7 @@ found:
priv->prd = best_prd;
priv->frs = frs;
priv->dsiclk = best_pll / 2;
+ priv->pclk = mode->clock * 1000;
return 0;
}
@@ -600,14 +608,14 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
clk_get_rate(priv->refclk), fbd, prd, frs);
- dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
+ dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, HSByteClk %u\n",
priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
mode->clock * 1000);
/* PRD[15:12] FBD[8:0] */
- tc358768_write(priv, TC358768_PLLCTL0, (prd << 12) | fbd);
+ tc358768_write(priv, TC358768_PLLCTL0, ((prd - 1) << 12) | (fbd - 1));
/* FRS[11:10] LBWS[9:8] CKEN[4] RESETB[1] EN[0] */
tc358768_write(priv, TC358768_PLLCTL1,
@@ -623,15 +631,36 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
return tc358768_clear_error(priv);
}
-#define TC358768_PRECISION 1000
-static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
+static u32 tc358768_ns_to_cnt(u32 ns, u32 period_ps)
+{
+ return DIV_ROUND_UP(ns * 1000, period_ps);
+}
+
+static u32 tc358768_ps_to_ns(u32 ps)
{
- return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
+ return ps / 1000;
+}
+
+static u32 tc358768_dpi_to_ns(u32 val, u32 pclk)
+{
+ return (u32)div_u64((u64)val * NANO, pclk);
+}
+
+/* Convert value in DPI pixel clock units to DSI byte count */
+static u32 tc358768_dpi_to_dsi_bytes(struct tc358768_priv *priv, u32 val)
+{
+ u64 m = (u64)val * priv->dsiclk / 4 * priv->dsi_lanes;
+ u64 n = priv->pclk;
+
+ return (u32)div_u64(m + n - 1, n);
}
-static u32 tc358768_to_ns(u32 nsk)
+static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
{
- return (nsk / TC358768_PRECISION);
+ u64 m = (u64)val * NANO;
+ u64 n = priv->dsiclk / 4 * priv->dsi_lanes;
+
+ return (u32)div_u64(m, n);
}
static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
@@ -642,13 +671,23 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
u32 val, val2, lptxcnt, hact, data_type;
s32 raw_val;
const struct drm_display_mode *mode;
- u32 dsibclk_nsk, dsiclk_nsk, ui_nsk;
- u32 dsiclk, dsibclk, video_start;
- const u32 internal_delay = 40;
+ u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
+ u32 dsiclk, hsbyteclk;
int ret, i;
+ struct videomode vm;
+ struct device *dev = priv->dev;
+ /* In pixelclock units */
+ u32 dpi_htot, dpi_data_start;
+ /* In byte units */
+ u32 dsi_dpi_htot, dsi_dpi_data_start;
+ u32 dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp;
+ const u32 dsi_hss = 4; /* HSS is a short packet (4 bytes) */
+ /* In hsbyteclk units */
+ u32 dsi_vsdly;
+ const u32 internal_dly = 40;
if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
- dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n");
+ dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n");
mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS;
}
@@ -656,7 +695,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
ret = tc358768_sw_reset(priv);
if (ret) {
- dev_err(priv->dev, "Software reset failed: %d\n", ret);
+ dev_err(dev, "Software reset failed: %d\n", ret);
tc358768_hw_disable(priv);
return;
}
@@ -664,53 +703,194 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
mode = &bridge->encoder->crtc->state->adjusted_mode;
ret = tc358768_setup_pll(priv, mode);
if (ret) {
- dev_err(priv->dev, "PLL setup failed: %d\n", ret);
+ dev_err(dev, "PLL setup failed: %d\n", ret);
tc358768_hw_disable(priv);
return;
}
+ drm_display_mode_to_videomode(mode, &vm);
+
dsiclk = priv->dsiclk;
- dsibclk = dsiclk / 4;
+ hsbyteclk = dsiclk / 4;
/* Data Format Control Register */
val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
switch (dsi_dev->format) {
case MIPI_DSI_FMT_RGB888:
val |= (0x3 << 4);
- hact = mode->hdisplay * 3;
- video_start = (mode->htotal - mode->hsync_start) * 3;
+ hact = vm.hactive * 3;
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
break;
case MIPI_DSI_FMT_RGB666:
val |= (0x4 << 4);
- hact = mode->hdisplay * 3;
- video_start = (mode->htotal - mode->hsync_start) * 3;
+ hact = vm.hactive * 3;
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
val |= (0x4 << 4) | BIT(3);
- hact = mode->hdisplay * 18 / 8;
- video_start = (mode->htotal - mode->hsync_start) * 18 / 8;
+ hact = vm.hactive * 18 / 8;
data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
break;
case MIPI_DSI_FMT_RGB565:
val |= (0x5 << 4);
- hact = mode->hdisplay * 2;
- video_start = (mode->htotal - mode->hsync_start) * 2;
+ hact = vm.hactive * 2;
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
break;
default:
- dev_err(priv->dev, "Invalid data format (%u)\n",
+ dev_err(dev, "Invalid data format (%u)\n",
dsi_dev->format);
tc358768_hw_disable(priv);
return;
}
+ /*
+ * There are three important things to make TC358768 work correctly,
+ * which are not trivial to manage:
+ *
+ * 1. Keep the DPI line-time and the DSI line-time as close to each
+ * other as possible.
+ * 2. TC358768 goes to LP mode after each line's active area. The DSI
+ * HFP period has to be long enough for entering and exiting LP mode.
+ * But it is not clear how to calculate this.
+ * 3. VSDly (video start delay) has to be long enough to ensure that the
+ * DSI TX does not start transmitting until we have started receiving
+ * pixel data from the DPI input. It is not clear how to calculate
+ * this either.
+ */
+
+ dpi_htot = vm.hactive + vm.hfront_porch + vm.hsync_len + vm.hback_porch;
+ dpi_data_start = vm.hsync_len + vm.hback_porch;
+
+ dev_dbg(dev, "dpi horiz timing (pclk): %u + %u + %u + %u = %u\n",
+ vm.hsync_len, vm.hback_porch, vm.hactive, vm.hfront_porch,
+ dpi_htot);
+
+ dev_dbg(dev, "dpi horiz timing (ns): %u + %u + %u + %u = %u\n",
+ tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock),
+ tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock),
+ tc358768_dpi_to_ns(vm.hactive, vm.pixelclock),
+ tc358768_dpi_to_ns(vm.hfront_porch, vm.pixelclock),
+ tc358768_dpi_to_ns(dpi_htot, vm.pixelclock));
+
+ dev_dbg(dev, "dpi data start (ns): %u + %u = %u\n",
+ tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock),
+ tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock),
+ tc358768_dpi_to_ns(dpi_data_start, vm.pixelclock));
+
+ dsi_dpi_htot = tc358768_dpi_to_dsi_bytes(priv, dpi_htot);
+ dsi_dpi_data_start = tc358768_dpi_to_dsi_bytes(priv, dpi_data_start);
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ dsi_hsw = tc358768_dpi_to_dsi_bytes(priv, vm.hsync_len);
+ dsi_hbp = tc358768_dpi_to_dsi_bytes(priv, vm.hback_porch);
+ } else {
+ /* HBP is included in HSW in event mode */
+ dsi_hbp = 0;
+ dsi_hsw = tc358768_dpi_to_dsi_bytes(priv,
+ vm.hsync_len +
+ vm.hback_porch);
+
+ /*
+ * The pixel packet includes the actual pixel data, and:
+ * DSI packet header = 4 bytes
+ * DCS code = 1 byte
+ * DSI packet footer = 2 bytes
+ */
+ dsi_hact = hact + 4 + 1 + 2;
+
+ dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss;
+
+ /*
+ * Here we should check if HFP is long enough for entering LP
+ * and exiting LP, but it's not clear how to calculate that.
+ * Instead, this is a naive algorithm that just adjusts the HFP
+ * and HSW so that HFP is (at least) roughly 2/3 of the total
+ * blanking time.
+ */
+ if (dsi_hfp < (dsi_hfp + dsi_hsw + dsi_hss) * 2 / 3) {
+ u32 old_hfp = dsi_hfp;
+ u32 old_hsw = dsi_hsw;
+ u32 tot = dsi_hfp + dsi_hsw + dsi_hss;
+
+ dsi_hsw = tot / 3;
+
+ /*
+ * Seems like sometimes HSW has to be divisible by num-lanes, but
+ * not always...
+ */
+ dsi_hsw = roundup(dsi_hsw, priv->dsi_lanes);
+
+ dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss;
+
+ dev_dbg(dev,
+ "hfp too short, adjusting dsi hfp and dsi hsw from %u, %u to %u, %u\n",
+ old_hfp, old_hsw, dsi_hfp, dsi_hsw);
+ }
+
+ dev_dbg(dev,
+ "dsi horiz timing (bytes): %u, %u + %u + %u + %u = %u\n",
+ dsi_hss, dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp,
+ dsi_hss + dsi_hsw + dsi_hbp + dsi_hact + dsi_hfp);
+
+ dev_dbg(dev, "dsi horiz timing (ns): %u + %u + %u + %u + %u = %u\n",
+ tc358768_dsi_bytes_to_ns(priv, dsi_hss),
+ tc358768_dsi_bytes_to_ns(priv, dsi_hsw),
+ tc358768_dsi_bytes_to_ns(priv, dsi_hbp),
+ tc358768_dsi_bytes_to_ns(priv, dsi_hact),
+ tc358768_dsi_bytes_to_ns(priv, dsi_hfp),
+ tc358768_dsi_bytes_to_ns(priv, dsi_hss + dsi_hsw +
+ dsi_hbp + dsi_hact + dsi_hfp));
+ }
+
+ /* VSDly calculation */
+
+ /* Start with the HW internal delay */
+ dsi_vsdly = internal_dly;
+
+ /* Convert to byte units as the other variables are in byte units */
+ dsi_vsdly *= priv->dsi_lanes;
+
+ /* Do we need more delay, in addition to the internal? */
+ if (dsi_dpi_data_start > dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp) {
+ dsi_vsdly = dsi_dpi_data_start - dsi_hss - dsi_hsw - dsi_hbp;
+ dsi_vsdly = roundup(dsi_vsdly, priv->dsi_lanes);
+ }
+
+ dev_dbg(dev, "dsi data start (bytes) %u + %u + %u + %u = %u\n",
+ dsi_vsdly, dsi_hss, dsi_hsw, dsi_hbp,
+ dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp);
+
+ dev_dbg(dev, "dsi data start (ns) %u + %u + %u + %u = %u\n",
+ tc358768_dsi_bytes_to_ns(priv, dsi_vsdly),
+ tc358768_dsi_bytes_to_ns(priv, dsi_hss),
+ tc358768_dsi_bytes_to_ns(priv, dsi_hsw),
+ tc358768_dsi_bytes_to_ns(priv, dsi_hbp),
+ tc358768_dsi_bytes_to_ns(priv, dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp));
+
+ /* Convert back to hsbyteclk */
+ dsi_vsdly /= priv->dsi_lanes;
+
+ /*
+ * The docs say that there is an internal delay of 40 cycles.
+ * However, we get underflows if we follow that rule. If we
+ * instead ignore the internal delay, things work. So either
+ * the docs are wrong or the calculations are wrong.
+ *
+ * As a temporary fix, add the internal delay here, to counter
+ * the subtraction when writing the register.
+ */
+ dsi_vsdly += internal_dly;
+
+ /* Clamp to the register max */
+ if (dsi_vsdly - internal_dly > 0x3ff) {
+ dev_warn(dev, "VSDly too high, underflows likely\n");
+ dsi_vsdly = 0x3ff + internal_dly;
+ }
+
/* VSDly[9:0] */
- video_start = max(video_start, internal_delay + 1) - internal_delay;
- tc358768_write(priv, TC358768_VSDLY, video_start);
+ tc358768_write(priv, TC358768_VSDLY, dsi_vsdly - internal_dly);
tc358768_write(priv, TC358768_DATAFMT, val);
tc358768_write(priv, TC358768_DSITX_DT, data_type);
@@ -722,67 +902,67 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
/* DSI Timings */
- dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
- dsibclk);
- dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
- ui_nsk = dsiclk_nsk / 2;
- dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
- dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
- dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
+ hsbyteclk_ps = (u32)div_u64(PICO, hsbyteclk);
+ dsiclk_ps = (u32)div_u64(PICO, dsiclk);
+ ui_ps = dsiclk_ps / 2;
+ dev_dbg(dev, "dsiclk: %u ps, ui %u ps, hsbyteclk %u ps\n", dsiclk_ps,
+ ui_ps, hsbyteclk_ps);
/* LP11 > 100us for D-PHY Rx Init */
- val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
- dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
+ val = tc358768_ns_to_cnt(100 * 1000, hsbyteclk_ps) - 1;
+ dev_dbg(dev, "LINEINITCNT: %u\n", val);
tc358768_write(priv, TC358768_LINEINITCNT, val);
/* LPTimeCnt > 50ns */
- val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
+ val = tc358768_ns_to_cnt(50, hsbyteclk_ps) - 1;
lptxcnt = val;
- dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
+ dev_dbg(dev, "LPTXTIMECNT: %u\n", val);
tc358768_write(priv, TC358768_LPTXTIMECNT, val);
/* 38ns < TCLK_PREPARE < 95ns */
- val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
+ val = tc358768_ns_to_cnt(65, hsbyteclk_ps) - 1;
+ dev_dbg(dev, "TCLK_PREPARECNT %u\n", val);
/* TCLK_PREPARE + TCLK_ZERO > 300ns */
- val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk),
- dsibclk_nsk) - 2;
+ val2 = tc358768_ns_to_cnt(300 - tc358768_ps_to_ns(2 * ui_ps),
+ hsbyteclk_ps) - 2;
+ dev_dbg(dev, "TCLK_ZEROCNT %u\n", val2);
val |= val2 << 8;
- dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
/* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */
- raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5;
+ raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(2 * ui_ps), hsbyteclk_ps) - 5;
val = clamp(raw_val, 0, 127);
- dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
+ dev_dbg(dev, "TCLK_TRAILCNT: %u\n", val);
tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
/* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
- val = 50 + tc358768_to_ns(4 * ui_nsk);
- val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+ val = 50 + tc358768_ps_to_ns(4 * ui_ps);
+ val = tc358768_ns_to_cnt(val, hsbyteclk_ps) - 1;
+ dev_dbg(dev, "THS_PREPARECNT %u\n", val);
/* THS_PREPARE + THS_ZERO > 145ns + 10*UI */
- raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10;
+ raw_val = tc358768_ns_to_cnt(145 - tc358768_ps_to_ns(3 * ui_ps), hsbyteclk_ps) - 10;
val2 = clamp(raw_val, 0, 127);
+ dev_dbg(dev, "THS_ZEROCNT %u\n", val2);
val |= val2 << 8;
- dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
tc358768_write(priv, TC358768_THS_HEADERCNT, val);
/* TWAKEUP > 1ms in lptxcnt steps */
- val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
+ val = tc358768_ns_to_cnt(1020000, hsbyteclk_ps);
val = val / (lptxcnt + 1) - 1;
- dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
+ dev_dbg(dev, "TWAKEUP: %u\n", val);
tc358768_write(priv, TC358768_TWAKEUP, val);
/* TCLK_POSTCNT > 60ns + 52*UI */
- val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
- dsibclk_nsk) - 3;
- dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
+ val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(52 * ui_ps),
+ hsbyteclk_ps) - 3;
+ dev_dbg(dev, "TCLK_POSTCNT: %u\n", val);
tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
/* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */
- raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk),
- dsibclk_nsk) - 4;
+ raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(18 * ui_ps),
+ hsbyteclk_ps) - 4;
val = clamp(raw_val, 0, 15);
- dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
+ dev_dbg(dev, "THS_TRAILCNT: %u\n", val);
tc358768_write(priv, TC358768_THS_TRAILCNT, val);
val = BIT(0);
@@ -790,16 +970,17 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
val |= BIT(i + 1);
tc358768_write(priv, TC358768_HSTXVREGEN, val);
- if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
- tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
+ tc358768_write(priv, TC358768_TXOPTIONCNTRL,
+ (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : BIT(0));
/* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
- val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
- val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1;
- val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
- dsibclk_nsk) - 2;
+ val = tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps * 4);
+ val = tc358768_ns_to_cnt(val, hsbyteclk_ps) / 4 - 1;
+ dev_dbg(dev, "TXTAGOCNT: %u\n", val);
+ val2 = tc358768_ns_to_cnt(tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps),
+ hsbyteclk_ps) - 2;
+ dev_dbg(dev, "RXTASURECNT: %u\n", val2);
val = val << 16 | val2;
- dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
tc358768_write(priv, TC358768_BTACNTRL1, val);
/* START[0] */
@@ -810,58 +991,44 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
tc358768_write(priv, TC358768_DSI_EVENT, 0);
/* vact */
- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
+ tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
/* vsw */
- tc358768_write(priv, TC358768_DSI_VSW,
- mode->vsync_end - mode->vsync_start);
+ tc358768_write(priv, TC358768_DSI_VSW, vm.vsync_len);
+
/* vbp */
- tc358768_write(priv, TC358768_DSI_VBPR,
- mode->vtotal - mode->vsync_end);
-
- /* hsw * byteclk * ndl / pclk */
- val = (u32)div_u64((mode->hsync_end - mode->hsync_start) *
- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
- mode->clock * 1000);
- tc358768_write(priv, TC358768_DSI_HSW, val);
-
- /* hbp * byteclk * ndl / pclk */
- val = (u32)div_u64((mode->htotal - mode->hsync_end) *
- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
- mode->clock * 1000);
- tc358768_write(priv, TC358768_DSI_HBPR, val);
+ tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch);
} else {
/* Set event mode */
tc358768_write(priv, TC358768_DSI_EVENT, 1);
/* vact */
- tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
+ tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
/* vsw (+ vbp) */
tc358768_write(priv, TC358768_DSI_VSW,
- mode->vtotal - mode->vsync_start);
+ vm.vsync_len + vm.vback_porch);
+
/* vbp (not used in event mode) */
tc358768_write(priv, TC358768_DSI_VBPR, 0);
+ }
- /* (hsw + hbp) * byteclk * ndl / pclk */
- val = (u32)div_u64((mode->htotal - mode->hsync_start) *
- ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
- mode->clock * 1000);
- tc358768_write(priv, TC358768_DSI_HSW, val);
+ /* hsw (bytes) */
+ tc358768_write(priv, TC358768_DSI_HSW, dsi_hsw);
- /* hbp (not used in event mode) */
- tc358768_write(priv, TC358768_DSI_HBPR, 0);
- }
+ /* hbp (bytes) */
+ tc358768_write(priv, TC358768_DSI_HBPR, dsi_hbp);
/* hact (bytes) */
tc358768_write(priv, TC358768_DSI_HACT, hact);
/* VSYNC polarity */
- if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
- tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5),
+ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(5) : 0);
+
/* HSYNC polarity */
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0),
+ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(0) : 0);
/* Start DSI Tx */
tc358768_write(priv, TC358768_DSI_START, 0x1);
@@ -891,7 +1058,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
ret = tc358768_clear_error(priv);
if (ret) {
- dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
+ dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
tc358768_bridge_disable(bridge);
tc358768_bridge_post_disable(bridge);
}
@@ -959,9 +1126,27 @@ tc358768_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
return input_fmts;
}
+static bool tc358768_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Default to positive sync */
+
+ if (!(adjusted_mode->flags &
+ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+
+ if (!(adjusted_mode->flags &
+ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+ adjusted_mode->flags |= DRM_MODE_FLAG_PVSYNC;
+
+ return true;
+}
+
static const struct drm_bridge_funcs tc358768_bridge_funcs = {
.attach = tc358768_bridge_attach,
.mode_valid = tc358768_bridge_mode_valid,
+ .mode_fixup = tc358768_mode_fixup,
.pre_enable = tc358768_bridge_pre_enable,
.enable = tc358768_bridge_enable,
.disable = tc358768_bridge_disable,
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index e04f87ff755a..c490e8befc2f 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -3308,8 +3308,7 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload)
{
struct drm_dp_mst_port *port;
- int ret = 0;
- bool allocate = true;
+ int ret;
/* Update mst mgr info */
if (mgr->payload_count == 0)
@@ -3320,27 +3319,27 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
mgr->payload_count++;
mgr->next_start_slot += payload->time_slots;
+ payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL;
+
/* Allocate payload to immediate downstream facing port */
port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
if (!port) {
drm_dbg_kms(mgr->dev,
"VCPI %d for port %p not in topology, not creating a payload to remote\n",
payload->vcpi, payload->port);
- allocate = false;
+ return -EIO;
}
- if (allocate) {
- ret = drm_dp_create_payload_at_dfp(mgr, payload);
- if (ret < 0)
- drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
- payload->port, ret);
-
+ ret = drm_dp_create_payload_at_dfp(mgr, payload);
+ if (ret < 0) {
+ drm_dbg_kms(mgr->dev, "Failed to create MST payload for port %p: %d\n",
+ payload->port, ret);
+ goto put_port;
}
- payload->payload_allocation_status =
- (!allocate || ret < 0) ? DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL :
- DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;
+ payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;
+put_port:
drm_dp_mst_topology_put_port(port);
return ret;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 292e38eb6218..71d399397107 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3339,6 +3339,9 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
struct drm_modeset_acquire_ctx ctx;
int ret;
+ if (dev == NULL)
+ return;
+
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
ret = drm_atomic_helper_disable_all(dev, &ctx);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index cf92a9ae8034..2ed2585ded37 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -235,7 +235,8 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
static int
drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
{
- if (file_priv->pid == task_pid(current) && file_priv->was_master)
+ if (file_priv->was_master &&
+ rcu_access_pointer(file_priv->pid) == task_pid(current))
return 0;
if (!capable(CAP_SYS_ADMIN))
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index e6f5ba5f4baf..f57e6d74fb0e 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -480,10 +480,12 @@ err_undo:
static int __alloc_range(struct drm_buddy *mm,
struct list_head *dfs,
u64 start, u64 size,
- struct list_head *blocks)
+ struct list_head *blocks,
+ u64 *total_allocated_on_err)
{
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
+ u64 total_allocated = 0;
LIST_HEAD(allocated);
u64 end;
int err;
@@ -520,6 +522,7 @@ static int __alloc_range(struct drm_buddy *mm,
}
mark_allocated(block);
+ total_allocated += drm_buddy_block_size(mm, block);
mm->avail -= drm_buddy_block_size(mm, block);
list_add_tail(&block->link, &allocated);
continue;
@@ -551,13 +554,20 @@ err_undo:
__drm_buddy_free(mm, block);
err_free:
- drm_buddy_free_list(mm, &allocated);
+ if (err == -ENOSPC && total_allocated_on_err) {
+ list_splice_tail(&allocated, blocks);
+ *total_allocated_on_err = total_allocated;
+ } else {
+ drm_buddy_free_list(mm, &allocated);
+ }
+
return err;
}
static int __drm_buddy_alloc_range(struct drm_buddy *mm,
u64 start,
u64 size,
+ u64 *total_allocated_on_err,
struct list_head *blocks)
{
LIST_HEAD(dfs);
@@ -566,7 +576,62 @@ static int __drm_buddy_alloc_range(struct drm_buddy *mm,
for (i = 0; i < mm->n_roots; ++i)
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
- return __alloc_range(mm, &dfs, start, size, blocks);
+ return __alloc_range(mm, &dfs, start, size,
+ blocks, total_allocated_on_err);
+}
+
+static int __alloc_contig_try_harder(struct drm_buddy *mm,
+ u64 size,
+ u64 min_block_size,
+ struct list_head *blocks)
+{
+ u64 rhs_offset, lhs_offset, lhs_size, filled;
+ struct drm_buddy_block *block;
+ struct list_head *list;
+ LIST_HEAD(blocks_lhs);
+ unsigned long pages;
+ unsigned int order;
+ u64 modify_size;
+ int err;
+
+ modify_size = rounddown_pow_of_two(size);
+ pages = modify_size >> ilog2(mm->chunk_size);
+ order = fls(pages) - 1;
+ if (order == 0)
+ return -ENOSPC;
+
+ list = &mm->free_list[order];
+ if (list_empty(list))
+ return -ENOSPC;
+
+ list_for_each_entry_reverse(block, list, link) {
+ /* Allocate blocks traversing RHS */
+ rhs_offset = drm_buddy_block_offset(block);
+ err = __drm_buddy_alloc_range(mm, rhs_offset, size,
+ &filled, blocks);
+ if (!err || err != -ENOSPC)
+ return err;
+
+ lhs_size = max((size - filled), min_block_size);
+ if (!IS_ALIGNED(lhs_size, min_block_size))
+ lhs_size = round_up(lhs_size, min_block_size);
+
+ /* Allocate blocks traversing LHS */
+ lhs_offset = drm_buddy_block_offset(block) - lhs_size;
+ err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
+ NULL, &blocks_lhs);
+ if (!err) {
+ list_splice(&blocks_lhs, blocks);
+ return 0;
+ } else if (err != -ENOSPC) {
+ drm_buddy_free_list(mm, blocks);
+ return err;
+ }
+ /* Free blocks for the next iteration */
+ drm_buddy_free_list(mm, blocks);
+ }
+
+ return -ENOSPC;
}
/**
@@ -626,7 +691,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
new_start = drm_buddy_block_offset(block);
list_add(&block->tmp_link, &dfs);
- err = __alloc_range(mm, &dfs, new_start, new_size, blocks);
+ err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
if (err) {
mark_allocated(block);
mm->avail -= drm_buddy_block_size(mm, block);
@@ -645,7 +710,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
* @start: start of the allowed range for this block
* @end: end of the allowed range for this block
* @size: size of the allocation
- * @min_page_size: alignment of the allocation
+ * @min_block_size: alignment of the allocation
* @blocks: output list head to add allocated blocks
* @flags: DRM_BUDDY_*_ALLOCATION flags
*
@@ -660,23 +725,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
*/
int drm_buddy_alloc_blocks(struct drm_buddy *mm,
u64 start, u64 end, u64 size,
- u64 min_page_size,
+ u64 min_block_size,
struct list_head *blocks,
unsigned long flags)
{
struct drm_buddy_block *block = NULL;
+ u64 original_size, original_min_size;
unsigned int min_order, order;
- unsigned long pages;
LIST_HEAD(allocated);
+ unsigned long pages;
int err;
if (size < mm->chunk_size)
return -EINVAL;
- if (min_page_size < mm->chunk_size)
+ if (min_block_size < mm->chunk_size)
return -EINVAL;
- if (!is_power_of_2(min_page_size))
+ if (!is_power_of_2(min_block_size))
return -EINVAL;
if (!IS_ALIGNED(start | end | size, mm->chunk_size))
@@ -690,14 +756,23 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
/* Actual range allocation */
if (start + size == end)
- return __drm_buddy_alloc_range(mm, start, size, blocks);
-
- if (!IS_ALIGNED(size, min_page_size))
- return -EINVAL;
+ return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
+
+ original_size = size;
+ original_min_size = min_block_size;
+
+ /* Roundup the size to power of 2 */
+ if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
+ size = roundup_pow_of_two(size);
+ min_block_size = size;
+ /* Align size value to min_block_size */
+ } else if (!IS_ALIGNED(size, min_block_size)) {
+ size = round_up(size, min_block_size);
+ }
pages = size >> ilog2(mm->chunk_size);
order = fls(pages) - 1;
- min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
+ min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
do {
order = min(order, (unsigned int)fls(pages) - 1);
@@ -716,6 +791,16 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
break;
if (order-- == min_order) {
+ if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
+ !(flags & DRM_BUDDY_RANGE_ALLOCATION))
+ /*
+ * Try contiguous block allocation through
+ * try harder method
+ */
+ return __alloc_contig_try_harder(mm,
+ original_size,
+ original_min_size,
+ blocks);
err = -ENOSPC;
goto err_free;
}
@@ -732,6 +817,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
break;
} while (1);
+ /* Trim the allocated block to the required size */
+ if (original_size != size) {
+ struct list_head *trim_list;
+ LIST_HEAD(temp);
+ u64 trim_size;
+
+ trim_list = &allocated;
+ trim_size = original_size;
+
+ if (!list_is_singular(&allocated)) {
+ block = list_last_entry(&allocated, typeof(*block), link);
+ list_move(&block->link, &temp);
+ trim_list = &temp;
+ trim_size = drm_buddy_block_size(mm, block) -
+ (size - original_size);
+ }
+
+ drm_buddy_block_trim(mm,
+ trim_size,
+ trim_list);
+
+ if (!list_empty(&temp))
+ list_splice_tail(trim_list, &allocated);
+ }
+
list_splice_tail(&allocated, blocks);
return 0;
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 34c7d1a580e3..f291fb4b359f 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -40,7 +40,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_managed.h>
-#include <drm/drm_gpuva_mgr.h>
+#include <drm/drm_gpuvm.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@@ -92,15 +92,17 @@ static int drm_clients_info(struct seq_file *m, void *data)
*/
mutex_lock(&dev->filelist_mutex);
list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
- struct task_struct *task;
bool is_current_master = drm_is_current_master(priv);
+ struct task_struct *task;
+ struct pid *pid;
- rcu_read_lock(); /* locks pid_task()->comm */
- task = pid_task(priv->pid, PIDTYPE_TGID);
+ rcu_read_lock(); /* Locks priv->pid and pid_task()->comm! */
+ pid = rcu_dereference(priv->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
task ? task->comm : "<unknown>",
- pid_vnr(priv->pid),
+ pid_vnr(pid),
priv->minor->index,
is_current_master ? 'y' : 'n',
priv->authenticated ? 'y' : 'n',
@@ -187,31 +189,31 @@ static const struct file_operations drm_debugfs_fops = {
/**
* drm_debugfs_gpuva_info - dump the given DRM GPU VA space
* @m: pointer to the &seq_file to write
- * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
*
* Dumps the GPU VA mappings of a given DRM GPU VA manager.
*
* For each DRM GPU VA space drivers should call this function from their
* &drm_info_list's show callback.
*
- * Returns: 0 on success, -ENODEV if the &mgr is not initialized
+ * Returns: 0 on success, -ENODEV if the &gpuvm is not initialized
*/
int drm_debugfs_gpuva_info(struct seq_file *m,
- struct drm_gpuva_manager *mgr)
+ struct drm_gpuvm *gpuvm)
{
- struct drm_gpuva *va, *kva = &mgr->kernel_alloc_node;
+ struct drm_gpuva *va, *kva = &gpuvm->kernel_alloc_node;
- if (!mgr->name)
+ if (!gpuvm->name)
return -ENODEV;
seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n",
- mgr->name, mgr->mm_start, mgr->mm_start + mgr->mm_range);
+ gpuvm->name, gpuvm->mm_start, gpuvm->mm_start + gpuvm->mm_range);
seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n",
kva->va.addr, kva->va.addr + kva->va.range);
seq_puts(m, "\n");
seq_puts(m, " VAs | start | range | end | object | object offset\n");
seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n");
- drm_gpuva_for_each_va(va, mgr) {
+ drm_gpuvm_for_each_va(va, gpuvm) {
if (unlikely(va == kva))
continue;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 340da8257b51..6e587f58c7aa 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3496,11 +3496,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
- /* Some EDIDs have bogus h/vtotal values */
- if (mode->hsync_end > mode->htotal)
- mode->htotal = mode->hsync_end + 1;
- if (mode->vsync_end > mode->vtotal)
- mode->vtotal = mode->vsync_end + 1;
+ /* Some EDIDs have bogus h/vsync_end values */
+ if (mode->hsync_end > mode->htotal) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing hsync_end %d->%d\n",
+ connector->base.id, connector->name,
+ mode->hsync_end, mode->htotal);
+ mode->hsync_end = mode->htotal;
+ }
+ if (mode->vsync_end > mode->vtotal) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing vsync_end %d->%d\n",
+ connector->base.id, connector->name,
+ mode->vsync_end, mode->vtotal);
+ mode->vsync_end = mode->vtotal;
+ }
drm_mode_do_interlace_quirk(mode, pt);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 883d83bc0e3d..e692770ef6d3 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -160,7 +160,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
/* Get a unique identifier for fdinfo: */
file->client_id = atomic64_inc_return(&ident);
- file->pid = get_pid(task_tgid(current));
+ rcu_assign_pointer(file->pid, get_pid(task_tgid(current)));
file->minor = minor;
/* for compatibility root is always authenticated */
@@ -200,7 +200,7 @@ out_prime_destroy:
drm_syncobj_release(file);
if (drm_core_check_feature(dev, DRIVER_GEM))
drm_gem_release(dev, file);
- put_pid(file->pid);
+ put_pid(rcu_access_pointer(file->pid));
kfree(file);
return ERR_PTR(ret);
@@ -291,7 +291,7 @@ void drm_file_free(struct drm_file *file)
WARN_ON(!list_empty(&file->event_list));
- put_pid(file->pid);
+ put_pid(rcu_access_pointer(file->pid));
kfree(file);
}
@@ -505,6 +505,40 @@ int drm_release(struct inode *inode, struct file *filp)
}
EXPORT_SYMBOL(drm_release);
+void drm_file_update_pid(struct drm_file *filp)
+{
+ struct drm_device *dev;
+ struct pid *pid, *old;
+
+ /*
+ * Master nodes need to keep the original ownership in order for
+ * drm_master_check_perm to keep working correctly. (See comment in
+ * drm_auth.c.)
+ */
+ if (filp->was_master)
+ return;
+
+ pid = task_tgid(current);
+
+ /*
+ * Quick unlocked check since the model is a single handover followed by
+ * exclusive repeated use.
+ */
+ if (pid == rcu_access_pointer(filp->pid))
+ return;
+
+ dev = filp->minor->dev;
+ mutex_lock(&dev->filelist_mutex);
+ old = rcu_replace_pointer(filp->pid, pid, 1);
+ mutex_unlock(&dev->filelist_mutex);
+
+ if (pid != old) {
+ get_pid(pid);
+ synchronize_rcu();
+ put_pid(old);
+ }
+}
+
/**
* drm_release_noglobal - release method for DRM file
* @inode: device inode
diff --git a/drivers/gpu/drm/drm_gpuva_mgr.c b/drivers/gpu/drm/drm_gpuvm.c
index ad99c9cfedac..02ce6baacdad 100644
--- a/drivers/gpu/drm/drm_gpuva_mgr.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -25,7 +25,7 @@
*
*/
-#include <drm/drm_gpuva_mgr.h>
+#include <drm/drm_gpuvm.h>
#include <linux/interval_tree_generic.h>
#include <linux/mm.h>
@@ -33,8 +33,8 @@
/**
* DOC: Overview
*
- * The DRM GPU VA Manager, represented by struct drm_gpuva_manager keeps track
- * of a GPU's virtual address (VA) space and manages the corresponding virtual
+ * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a
+ * GPU's virtual address (VA) space and manages the corresponding virtual
* mappings represented by &drm_gpuva objects. It also keeps track of the
* mapping's backing &drm_gem_object buffers.
*
@@ -47,28 +47,28 @@
* The GPU VA manager internally uses a rb-tree to manage the
* &drm_gpuva mappings within a GPU's virtual address space.
*
- * The &drm_gpuva_manager contains a special &drm_gpuva representing the
+ * The &drm_gpuvm structure contains a special &drm_gpuva representing the
* portion of VA space reserved by the kernel. This node is initialized together
* with the GPU VA manager instance and removed when the GPU VA manager is
* destroyed.
*
- * In a typical application drivers would embed struct drm_gpuva_manager and
+ * In a typical application drivers would embed struct drm_gpuvm and
* struct drm_gpuva within their own driver specific structures, there won't be
* any memory allocations of its own nor memory allocations of &drm_gpuva
* entries.
*
- * The data structures needed to store &drm_gpuvas within the &drm_gpuva_manager
- * are contained within struct drm_gpuva already. Hence, for inserting
- * &drm_gpuva entries from within dma-fence signalling critical sections it is
- * enough to pre-allocate the &drm_gpuva structures.
+ * The data structures needed to store &drm_gpuvas within the &drm_gpuvm are
+ * contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva
+ * entries from within dma-fence signalling critical sections it is enough to
+ * pre-allocate the &drm_gpuva structures.
*/
/**
* DOC: Split and Merge
*
* Besides its capability to manage and represent a GPU VA space, the
- * &drm_gpuva_manager also provides functions to let the &drm_gpuva_manager
- * calculate a sequence of operations to satisfy a given map or unmap request.
+ * GPU VA manager also provides functions to let the &drm_gpuvm calculate a
+ * sequence of operations to satisfy a given map or unmap request.
*
* Therefore the DRM GPU VA manager provides an algorithm implementing splitting
* and merging of existent GPU VA mappings with the ones that are requested to
@@ -76,16 +76,16 @@
* implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this
* as VM BIND.
*
- * Drivers can call drm_gpuva_sm_map() to receive a sequence of callbacks
+ * Drivers can call drm_gpuvm_sm_map() to receive a sequence of callbacks
* containing map, unmap and remap operations for a given newly requested
* mapping. The sequence of callbacks represents the set of operations to
* execute in order to integrate the new mapping cleanly into the current state
* of the GPU VA space.
*
* Depending on how the new GPU VA mapping intersects with the existent mappings
- * of the GPU VA space the &drm_gpuva_fn_ops callbacks contain an arbitrary
- * amount of unmap operations, a maximum of two remap operations and a single
- * map operation. The caller might receive no callback at all if no operation is
+ * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount
+ * of unmap operations, a maximum of two remap operations and a single map
+ * operation. The caller might receive no callback at all if no operation is
* required, e.g. if the requested mapping already exists in the exact same way.
*
* The single map operation represents the original map operation requested by
@@ -95,7 +95,7 @@
* &drm_gpuva to unmap is physically contiguous with the original mapping
* request. Optionally, if 'keep' is set, drivers may keep the actual page table
* entries for this &drm_gpuva, adding the missing page table entries only and
- * update the &drm_gpuva_manager's view of things accordingly.
+ * update the &drm_gpuvm's view of things accordingly.
*
* Drivers may do the same optimization, namely delta page table updates, also
* for remap operations. This is possible since &drm_gpuva_op_remap consists of
@@ -106,34 +106,34 @@
* the beginning and one at the end of the new mapping, hence there is a
* maximum of two remap operations.
*
- * Analogous to drm_gpuva_sm_map() drm_gpuva_sm_unmap() uses &drm_gpuva_fn_ops
- * to call back into the driver in order to unmap a range of GPU VA space. The
+ * Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to
+ * call back into the driver in order to unmap a range of GPU VA space. The
* logic behind this function is way simpler though: For all existent mappings
* enclosed by the given range unmap operations are created. For mappings which
* are only partically located within the given range, remap operations are
* created such that those mappings are split up and re-mapped partically.
*
- * As an alternative to drm_gpuva_sm_map() and drm_gpuva_sm_unmap(),
- * drm_gpuva_sm_map_ops_create() and drm_gpuva_sm_unmap_ops_create() can be used
+ * As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(),
+ * drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used
* to directly obtain an instance of struct drm_gpuva_ops containing a list of
* &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list
* contains the &drm_gpuva_ops analogous to the callbacks one would receive when
- * calling drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). While this way requires
+ * calling drm_gpuvm_sm_map() or drm_gpuvm_sm_unmap(). While this way requires
* more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to
* iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory
* allocations are possible (e.g. to allocate GPU page tables) and once in the
* dma-fence signalling critical path.
*
- * To update the &drm_gpuva_manager's view of the GPU VA space
- * drm_gpuva_insert() and drm_gpuva_remove() may be used. These functions can
- * safely be used from &drm_gpuva_fn_ops callbacks originating from
- * drm_gpuva_sm_map() or drm_gpuva_sm_unmap(). However, it might be more
- * convenient to use the provided helper functions drm_gpuva_map(),
- * drm_gpuva_remap() and drm_gpuva_unmap() instead.
+ * To update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert() and
+ * drm_gpuva_remove() may be used. These functions can safely be used from
+ * &drm_gpuvm_ops callbacks originating from drm_gpuvm_sm_map() or
+ * drm_gpuvm_sm_unmap(). However, it might be more convenient to use the
+ * provided helper functions drm_gpuva_map(), drm_gpuva_remap() and
+ * drm_gpuva_unmap() instead.
*
* The following diagram depicts the basic relationships of existent GPU VA
* mappings, a newly requested mapping and the resulting mappings as implemented
- * by drm_gpuva_sm_map() - it doesn't cover any arbitrary combinations of these.
+ * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these.
*
* 1) Requested mapping is identical. Replace it, but indicate the backing PTEs
* could be kept.
@@ -421,10 +421,10 @@
* // Allocates a new &drm_gpuva.
* struct drm_gpuva * driver_gpuva_alloc(void);
*
- * // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
+ * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
- * int driver_mapping_create(struct drm_gpuva_manager *mgr,
+ * int driver_mapping_create(struct drm_gpuvm *gpuvm,
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
@@ -432,7 +432,7 @@
* struct drm_gpuva_op *op
*
* driver_lock_va_space();
- * ops = drm_gpuva_sm_map_ops_create(mgr, addr, range,
+ * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range,
* obj, offset);
* if (IS_ERR(ops))
* return PTR_ERR(ops);
@@ -448,7 +448,7 @@
* // free memory and unlock
*
* driver_vm_map();
- * drm_gpuva_map(mgr, va, &op->map);
+ * drm_gpuva_map(gpuvm, va, &op->map);
* drm_gpuva_link(va);
*
* break;
@@ -504,23 +504,23 @@
* 2) Receive a callback for each &drm_gpuva_op to create a new mapping::
*
* struct driver_context {
- * struct drm_gpuva_manager *mgr;
+ * struct drm_gpuvm *gpuvm;
* struct drm_gpuva *new_va;
* struct drm_gpuva *prev_va;
* struct drm_gpuva *next_va;
* };
*
- * // ops to pass to drm_gpuva_manager_init()
- * static const struct drm_gpuva_fn_ops driver_gpuva_ops = {
+ * // ops to pass to drm_gpuvm_init()
+ * static const struct drm_gpuvm_ops driver_gpuvm_ops = {
* .sm_step_map = driver_gpuva_map,
* .sm_step_remap = driver_gpuva_remap,
* .sm_step_unmap = driver_gpuva_unmap,
* };
*
- * // Typically drivers would embedd the &drm_gpuva_manager and &drm_gpuva
+ * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva
* // structure in individual driver structures and lock the dma-resv with
* // drm_exec or similar helpers.
- * int driver_mapping_create(struct drm_gpuva_manager *mgr,
+ * int driver_mapping_create(struct drm_gpuvm *gpuvm,
* u64 addr, u64 range,
* struct drm_gem_object *obj, u64 offset)
* {
@@ -529,7 +529,7 @@
* struct drm_gpuva_op *op;
* int ret = 0;
*
- * ctx.mgr = mgr;
+ * ctx.gpuvm = gpuvm;
*
* ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL);
* ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL);
@@ -540,7 +540,7 @@
* }
*
* driver_lock_va_space();
- * ret = drm_gpuva_sm_map(mgr, &ctx, addr, range, obj, offset);
+ * ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset);
* driver_unlock_va_space();
*
* out:
@@ -554,7 +554,7 @@
* {
* struct driver_context *ctx = __ctx;
*
- * drm_gpuva_map(ctx->mgr, ctx->new_va, &op->map);
+ * drm_gpuva_map(ctx->vm, ctx->new_va, &op->map);
*
* drm_gpuva_link(ctx->new_va);
*
@@ -609,12 +609,12 @@ INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last,
GPUVA_START, GPUVA_LAST, static __maybe_unused,
drm_gpuva_it)
-static int __drm_gpuva_insert(struct drm_gpuva_manager *mgr,
+static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va);
static void __drm_gpuva_remove(struct drm_gpuva *va);
static bool
-drm_gpuva_check_overflow(u64 addr, u64 range)
+drm_gpuvm_check_overflow(u64 addr, u64 range)
{
u64 end;
@@ -623,121 +623,121 @@ drm_gpuva_check_overflow(u64 addr, u64 range)
}
static bool
-drm_gpuva_in_mm_range(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
+drm_gpuvm_in_mm_range(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
{
u64 end = addr + range;
- u64 mm_start = mgr->mm_start;
- u64 mm_end = mm_start + mgr->mm_range;
+ u64 mm_start = gpuvm->mm_start;
+ u64 mm_end = mm_start + gpuvm->mm_range;
return addr >= mm_start && end <= mm_end;
}
static bool
-drm_gpuva_in_kernel_node(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
+drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
{
u64 end = addr + range;
- u64 kstart = mgr->kernel_alloc_node.va.addr;
- u64 krange = mgr->kernel_alloc_node.va.range;
+ u64 kstart = gpuvm->kernel_alloc_node.va.addr;
+ u64 krange = gpuvm->kernel_alloc_node.va.range;
u64 kend = kstart + krange;
return krange && addr < kend && kstart < end;
}
static bool
-drm_gpuva_range_valid(struct drm_gpuva_manager *mgr,
+drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm,
u64 addr, u64 range)
{
- return !drm_gpuva_check_overflow(addr, range) &&
- drm_gpuva_in_mm_range(mgr, addr, range) &&
- !drm_gpuva_in_kernel_node(mgr, addr, range);
+ return !drm_gpuvm_check_overflow(addr, range) &&
+ drm_gpuvm_in_mm_range(gpuvm, addr, range) &&
+ !drm_gpuvm_in_kernel_node(gpuvm, addr, range);
}
/**
- * drm_gpuva_manager_init() - initialize a &drm_gpuva_manager
- * @mgr: pointer to the &drm_gpuva_manager to initialize
+ * drm_gpuvm_init() - initialize a &drm_gpuvm
+ * @gpuvm: pointer to the &drm_gpuvm to initialize
* @name: the name of the GPU VA space
* @start_offset: the start offset of the GPU VA space
* @range: the size of the GPU VA space
* @reserve_offset: the start of the kernel reserved GPU VA area
* @reserve_range: the size of the kernel reserved GPU VA area
- * @ops: &drm_gpuva_fn_ops called on &drm_gpuva_sm_map / &drm_gpuva_sm_unmap
+ * @ops: &drm_gpuvm_ops called on &drm_gpuvm_sm_map / &drm_gpuvm_sm_unmap
*
- * The &drm_gpuva_manager must be initialized with this function before use.
+ * The &drm_gpuvm must be initialized with this function before use.
*
- * Note that @mgr must be cleared to 0 before calling this function. The given
+ * Note that @gpuvm must be cleared to 0 before calling this function. The given
* &name is expected to be managed by the surrounding driver structures.
*/
void
-drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
- const char *name,
- u64 start_offset, u64 range,
- u64 reserve_offset, u64 reserve_range,
- const struct drm_gpuva_fn_ops *ops)
+drm_gpuvm_init(struct drm_gpuvm *gpuvm,
+ const char *name,
+ u64 start_offset, u64 range,
+ u64 reserve_offset, u64 reserve_range,
+ const struct drm_gpuvm_ops *ops)
{
- mgr->rb.tree = RB_ROOT_CACHED;
- INIT_LIST_HEAD(&mgr->rb.list);
+ gpuvm->rb.tree = RB_ROOT_CACHED;
+ INIT_LIST_HEAD(&gpuvm->rb.list);
- drm_gpuva_check_overflow(start_offset, range);
- mgr->mm_start = start_offset;
- mgr->mm_range = range;
+ drm_gpuvm_check_overflow(start_offset, range);
+ gpuvm->mm_start = start_offset;
+ gpuvm->mm_range = range;
- mgr->name = name ? name : "unknown";
- mgr->ops = ops;
+ gpuvm->name = name ? name : "unknown";
+ gpuvm->ops = ops;
- memset(&mgr->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
+ memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva));
if (reserve_range) {
- mgr->kernel_alloc_node.va.addr = reserve_offset;
- mgr->kernel_alloc_node.va.range = reserve_range;
+ gpuvm->kernel_alloc_node.va.addr = reserve_offset;
+ gpuvm->kernel_alloc_node.va.range = reserve_range;
- if (likely(!drm_gpuva_check_overflow(reserve_offset,
+ if (likely(!drm_gpuvm_check_overflow(reserve_offset,
reserve_range)))
- __drm_gpuva_insert(mgr, &mgr->kernel_alloc_node);
+ __drm_gpuva_insert(gpuvm, &gpuvm->kernel_alloc_node);
}
}
-EXPORT_SYMBOL_GPL(drm_gpuva_manager_init);
+EXPORT_SYMBOL_GPL(drm_gpuvm_init);
/**
- * drm_gpuva_manager_destroy() - cleanup a &drm_gpuva_manager
- * @mgr: pointer to the &drm_gpuva_manager to clean up
+ * drm_gpuvm_destroy() - cleanup a &drm_gpuvm
+ * @gpuvm: pointer to the &drm_gpuvm to clean up
*
* Note that it is a bug to call this function on a manager that still
* holds GPU VA mappings.
*/
void
-drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr)
+drm_gpuvm_destroy(struct drm_gpuvm *gpuvm)
{
- mgr->name = NULL;
+ gpuvm->name = NULL;
- if (mgr->kernel_alloc_node.va.range)
- __drm_gpuva_remove(&mgr->kernel_alloc_node);
+ if (gpuvm->kernel_alloc_node.va.range)
+ __drm_gpuva_remove(&gpuvm->kernel_alloc_node);
- WARN(!RB_EMPTY_ROOT(&mgr->rb.tree.rb_root),
+ WARN(!RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root),
"GPUVA tree is not empty, potentially leaking memory.");
}
-EXPORT_SYMBOL_GPL(drm_gpuva_manager_destroy);
+EXPORT_SYMBOL_GPL(drm_gpuvm_destroy);
static int
-__drm_gpuva_insert(struct drm_gpuva_manager *mgr,
+__drm_gpuva_insert(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va)
{
struct rb_node *node;
struct list_head *head;
- if (drm_gpuva_it_iter_first(&mgr->rb.tree,
+ if (drm_gpuva_it_iter_first(&gpuvm->rb.tree,
GPUVA_START(va),
GPUVA_LAST(va)))
return -EEXIST;
- va->mgr = mgr;
+ va->vm = gpuvm;
- drm_gpuva_it_insert(va, &mgr->rb.tree);
+ drm_gpuva_it_insert(va, &gpuvm->rb.tree);
node = rb_prev(&va->rb.node);
if (node)
head = &(to_drm_gpuva(node))->rb.entry;
else
- head = &mgr->rb.list;
+ head = &gpuvm->rb.list;
list_add(&va->rb.entry, head);
@@ -746,36 +746,36 @@ __drm_gpuva_insert(struct drm_gpuva_manager *mgr,
/**
* drm_gpuva_insert() - insert a &drm_gpuva
- * @mgr: the &drm_gpuva_manager to insert the &drm_gpuva in
+ * @gpuvm: the &drm_gpuvm to insert the &drm_gpuva in
* @va: the &drm_gpuva to insert
*
* Insert a &drm_gpuva with a given address and range into a
- * &drm_gpuva_manager.
+ * &drm_gpuvm.
*
* It is safe to use this function using the safe versions of iterating the GPU
- * VA space, such as drm_gpuva_for_each_va_safe() and
- * drm_gpuva_for_each_va_range_safe().
+ * VA space, such as drm_gpuvm_for_each_va_safe() and
+ * drm_gpuvm_for_each_va_range_safe().
*
* Returns: 0 on success, negative error code on failure.
*/
int
-drm_gpuva_insert(struct drm_gpuva_manager *mgr,
+drm_gpuva_insert(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va)
{
u64 addr = va->va.addr;
u64 range = va->va.range;
- if (unlikely(!drm_gpuva_range_valid(mgr, addr, range)))
+ if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range)))
return -EINVAL;
- return __drm_gpuva_insert(mgr, va);
+ return __drm_gpuva_insert(gpuvm, va);
}
EXPORT_SYMBOL_GPL(drm_gpuva_insert);
static void
__drm_gpuva_remove(struct drm_gpuva *va)
{
- drm_gpuva_it_remove(va, &va->mgr->rb.tree);
+ drm_gpuva_it_remove(va, &va->vm->rb.tree);
list_del_init(&va->rb.entry);
}
@@ -786,15 +786,15 @@ __drm_gpuva_remove(struct drm_gpuva *va)
* This removes the given &va from the underlaying tree.
*
* It is safe to use this function using the safe versions of iterating the GPU
- * VA space, such as drm_gpuva_for_each_va_safe() and
- * drm_gpuva_for_each_va_range_safe().
+ * VA space, such as drm_gpuvm_for_each_va_safe() and
+ * drm_gpuvm_for_each_va_range_safe().
*/
void
drm_gpuva_remove(struct drm_gpuva *va)
{
- struct drm_gpuva_manager *mgr = va->mgr;
+ struct drm_gpuvm *gpuvm = va->vm;
- if (unlikely(va == &mgr->kernel_alloc_node)) {
+ if (unlikely(va == &gpuvm->kernel_alloc_node)) {
WARN(1, "Can't destroy kernel reserved node.\n");
return;
}
@@ -853,37 +853,37 @@ EXPORT_SYMBOL_GPL(drm_gpuva_unlink);
/**
* drm_gpuva_find_first() - find the first &drm_gpuva in the given range
- * @mgr: the &drm_gpuva_manager to search in
+ * @gpuvm: the &drm_gpuvm to search in
* @addr: the &drm_gpuvas address
* @range: the &drm_gpuvas range
*
* Returns: the first &drm_gpuva within the given range
*/
struct drm_gpuva *
-drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
+drm_gpuva_find_first(struct drm_gpuvm *gpuvm,
u64 addr, u64 range)
{
u64 last = addr + range - 1;
- return drm_gpuva_it_iter_first(&mgr->rb.tree, addr, last);
+ return drm_gpuva_it_iter_first(&gpuvm->rb.tree, addr, last);
}
EXPORT_SYMBOL_GPL(drm_gpuva_find_first);
/**
* drm_gpuva_find() - find a &drm_gpuva
- * @mgr: the &drm_gpuva_manager to search in
+ * @gpuvm: the &drm_gpuvm to search in
* @addr: the &drm_gpuvas address
* @range: the &drm_gpuvas range
*
* Returns: the &drm_gpuva at a given &addr and with a given &range
*/
struct drm_gpuva *
-drm_gpuva_find(struct drm_gpuva_manager *mgr,
+drm_gpuva_find(struct drm_gpuvm *gpuvm,
u64 addr, u64 range)
{
struct drm_gpuva *va;
- va = drm_gpuva_find_first(mgr, addr, range);
+ va = drm_gpuva_find_first(gpuvm, addr, range);
if (!va)
goto out;
@@ -900,7 +900,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_find);
/**
* drm_gpuva_find_prev() - find the &drm_gpuva before the given address
- * @mgr: the &drm_gpuva_manager to search in
+ * @gpuvm: the &drm_gpuvm to search in
* @start: the given GPU VA's start address
*
* Find the adjacent &drm_gpuva before the GPU VA with given &start address.
@@ -911,18 +911,18 @@ EXPORT_SYMBOL_GPL(drm_gpuva_find);
* Returns: a pointer to the found &drm_gpuva or NULL if none was found
*/
struct drm_gpuva *
-drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start)
+drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start)
{
- if (!drm_gpuva_range_valid(mgr, start - 1, 1))
+ if (!drm_gpuvm_range_valid(gpuvm, start - 1, 1))
return NULL;
- return drm_gpuva_it_iter_first(&mgr->rb.tree, start - 1, start);
+ return drm_gpuva_it_iter_first(&gpuvm->rb.tree, start - 1, start);
}
EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
/**
* drm_gpuva_find_next() - find the &drm_gpuva after the given address
- * @mgr: the &drm_gpuva_manager to search in
+ * @gpuvm: the &drm_gpuvm to search in
* @end: the given GPU VA's end address
*
* Find the adjacent &drm_gpuva after the GPU VA with given &end address.
@@ -933,47 +933,47 @@ EXPORT_SYMBOL_GPL(drm_gpuva_find_prev);
* Returns: a pointer to the found &drm_gpuva or NULL if none was found
*/
struct drm_gpuva *
-drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end)
+drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end)
{
- if (!drm_gpuva_range_valid(mgr, end, 1))
+ if (!drm_gpuvm_range_valid(gpuvm, end, 1))
return NULL;
- return drm_gpuva_it_iter_first(&mgr->rb.tree, end, end + 1);
+ return drm_gpuva_it_iter_first(&gpuvm->rb.tree, end, end + 1);
}
EXPORT_SYMBOL_GPL(drm_gpuva_find_next);
/**
- * drm_gpuva_interval_empty() - indicate whether a given interval of the VA space
+ * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space
* is empty
- * @mgr: the &drm_gpuva_manager to check the range for
+ * @gpuvm: the &drm_gpuvm to check the range for
* @addr: the start address of the range
* @range: the range of the interval
*
* Returns: true if the interval is empty, false otherwise
*/
bool
-drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range)
+drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range)
{
- return !drm_gpuva_find_first(mgr, addr, range);
+ return !drm_gpuva_find_first(gpuvm, addr, range);
}
-EXPORT_SYMBOL_GPL(drm_gpuva_interval_empty);
+EXPORT_SYMBOL_GPL(drm_gpuvm_interval_empty);
/**
* drm_gpuva_map() - helper to insert a &drm_gpuva according to a
* &drm_gpuva_op_map
- * @mgr: the &drm_gpuva_manager
+ * @gpuvm: the &drm_gpuvm
* @va: the &drm_gpuva to insert
* @op: the &drm_gpuva_op_map to initialize @va with
*
- * Initializes the @va from the @op and inserts it into the given @mgr.
+ * Initializes the @va from the @op and inserts it into the given @gpuvm.
*/
void
-drm_gpuva_map(struct drm_gpuva_manager *mgr,
+drm_gpuva_map(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va,
struct drm_gpuva_op_map *op)
{
drm_gpuva_init_from_op(va, op);
- drm_gpuva_insert(mgr, va);
+ drm_gpuva_insert(gpuvm, va);
}
EXPORT_SYMBOL_GPL(drm_gpuva_map);
@@ -993,18 +993,18 @@ drm_gpuva_remap(struct drm_gpuva *prev,
struct drm_gpuva_op_remap *op)
{
struct drm_gpuva *curr = op->unmap->va;
- struct drm_gpuva_manager *mgr = curr->mgr;
+ struct drm_gpuvm *gpuvm = curr->vm;
drm_gpuva_remove(curr);
if (op->prev) {
drm_gpuva_init_from_op(prev, op->prev);
- drm_gpuva_insert(mgr, prev);
+ drm_gpuva_insert(gpuvm, prev);
}
if (op->next) {
drm_gpuva_init_from_op(next, op->next);
- drm_gpuva_insert(mgr, next);
+ drm_gpuva_insert(gpuvm, next);
}
}
EXPORT_SYMBOL_GPL(drm_gpuva_remap);
@@ -1024,7 +1024,7 @@ drm_gpuva_unmap(struct drm_gpuva_op_unmap *op)
EXPORT_SYMBOL_GPL(drm_gpuva_unmap);
static int
-op_map_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
+op_map_cb(const struct drm_gpuvm_ops *fn, void *priv,
u64 addr, u64 range,
struct drm_gem_object *obj, u64 offset)
{
@@ -1040,7 +1040,7 @@ op_map_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
}
static int
-op_remap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
+op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv,
struct drm_gpuva_op_map *prev,
struct drm_gpuva_op_map *next,
struct drm_gpuva_op_unmap *unmap)
@@ -1058,7 +1058,7 @@ op_remap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
}
static int
-op_unmap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
+op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv,
struct drm_gpuva *va, bool merge)
{
struct drm_gpuva_op op = {};
@@ -1071,8 +1071,8 @@ op_unmap_cb(const struct drm_gpuva_fn_ops *fn, void *priv,
}
static int
-__drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
- const struct drm_gpuva_fn_ops *ops, void *priv,
+__drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_ops *ops, void *priv,
u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
{
@@ -1080,10 +1080,10 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
u64 req_end = req_addr + req_range;
int ret;
- if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
+ if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
return -EINVAL;
- drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
+ drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
struct drm_gem_object *obj = va->gem.obj;
u64 offset = va->gem.offset;
u64 addr = va->va.addr;
@@ -1213,18 +1213,18 @@ __drm_gpuva_sm_map(struct drm_gpuva_manager *mgr,
}
static int
-__drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr,
- const struct drm_gpuva_fn_ops *ops, void *priv,
+__drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
+ const struct drm_gpuvm_ops *ops, void *priv,
u64 req_addr, u64 req_range)
{
struct drm_gpuva *va, *next;
u64 req_end = req_addr + req_range;
int ret;
- if (unlikely(!drm_gpuva_range_valid(mgr, req_addr, req_range)))
+ if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range)))
return -EINVAL;
- drm_gpuva_for_each_va_range_safe(va, next, mgr, req_addr, req_end) {
+ drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) {
struct drm_gpuva_op_map prev = {}, next = {};
bool prev_split = false, next_split = false;
struct drm_gem_object *obj = va->gem.obj;
@@ -1271,8 +1271,8 @@ __drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr,
}
/**
- * drm_gpuva_sm_map() - creates the &drm_gpuva_op split/merge steps
- * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
* @req_obj: the &drm_gem_object to map
@@ -1280,15 +1280,15 @@ __drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr,
* @priv: pointer to a driver private data structure
*
* This function iterates the given range of the GPU VA space. It utilizes the
- * &drm_gpuva_fn_ops to call back into the driver providing the split and merge
+ * &drm_gpuvm_ops to call back into the driver providing the split and merge
* steps.
*
* Drivers may use these callbacks to update the GPU VA space right away within
* the callback. In case the driver decides to copy and store the operations for
- * later processing neither this function nor &drm_gpuva_sm_unmap is allowed to
- * be called before the &drm_gpuva_manager's view of the GPU VA space was
+ * later processing neither this function nor &drm_gpuvm_sm_unmap is allowed to
+ * be called before the &drm_gpuvm's view of the GPU VA space was
* updated with the previous set of operations. To update the
- * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
+ * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
* drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
* used.
*
@@ -1303,39 +1303,39 @@ __drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr,
* Returns: 0 on success or a negative error code
*/
int
-drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
+drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
{
- const struct drm_gpuva_fn_ops *ops = mgr->ops;
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
if (unlikely(!(ops && ops->sm_step_map &&
ops->sm_step_remap &&
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuva_sm_map(mgr, ops, priv,
+ return __drm_gpuvm_sm_map(gpuvm, ops, priv,
req_addr, req_range,
req_obj, req_offset);
}
-EXPORT_SYMBOL_GPL(drm_gpuva_sm_map);
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map);
/**
- * drm_gpuva_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
- * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
* @priv: pointer to a driver private data structure
* @req_addr: the start address of the range to unmap
* @req_range: the range of the mappings to unmap
*
* This function iterates the given range of the GPU VA space. It utilizes the
- * &drm_gpuva_fn_ops to call back into the driver providing the operations to
+ * &drm_gpuvm_ops to call back into the driver providing the operations to
* unmap and, if required, split existent mappings.
*
* Drivers may use these callbacks to update the GPU VA space right away within
* the callback. In case the driver decides to copy and store the operations for
- * later processing neither this function nor &drm_gpuva_sm_map is allowed to be
- * called before the &drm_gpuva_manager's view of the GPU VA space was updated
- * with the previous set of operations. To update the &drm_gpuva_manager's view
+ * later processing neither this function nor &drm_gpuvm_sm_map is allowed to be
+ * called before the &drm_gpuvm's view of the GPU VA space was updated
+ * with the previous set of operations. To update the &drm_gpuvm's view
* of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or
* drm_gpuva_destroy_unlocked() should be used.
*
@@ -1348,24 +1348,24 @@ EXPORT_SYMBOL_GPL(drm_gpuva_sm_map);
* Returns: 0 on success or a negative error code
*/
int
-drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
+drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 req_addr, u64 req_range)
{
- const struct drm_gpuva_fn_ops *ops = mgr->ops;
+ const struct drm_gpuvm_ops *ops = gpuvm->ops;
if (unlikely(!(ops && ops->sm_step_remap &&
ops->sm_step_unmap)))
return -EINVAL;
- return __drm_gpuva_sm_unmap(mgr, ops, priv,
+ return __drm_gpuvm_sm_unmap(gpuvm, ops, priv,
req_addr, req_range);
}
-EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap);
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap);
static struct drm_gpuva_op *
-gpuva_op_alloc(struct drm_gpuva_manager *mgr)
+gpuva_op_alloc(struct drm_gpuvm *gpuvm)
{
- const struct drm_gpuva_fn_ops *fn = mgr->ops;
+ const struct drm_gpuvm_ops *fn = gpuvm->ops;
struct drm_gpuva_op *op;
if (fn && fn->op_alloc)
@@ -1380,10 +1380,10 @@ gpuva_op_alloc(struct drm_gpuva_manager *mgr)
}
static void
-gpuva_op_free(struct drm_gpuva_manager *mgr,
+gpuva_op_free(struct drm_gpuvm *gpuvm,
struct drm_gpuva_op *op)
{
- const struct drm_gpuva_fn_ops *fn = mgr->ops;
+ const struct drm_gpuvm_ops *fn = gpuvm->ops;
if (fn && fn->op_free)
fn->op_free(op);
@@ -1396,14 +1396,14 @@ drm_gpuva_sm_step(struct drm_gpuva_op *__op,
void *priv)
{
struct {
- struct drm_gpuva_manager *mgr;
+ struct drm_gpuvm *vm;
struct drm_gpuva_ops *ops;
} *args = priv;
- struct drm_gpuva_manager *mgr = args->mgr;
+ struct drm_gpuvm *gpuvm = args->vm;
struct drm_gpuva_ops *ops = args->ops;
struct drm_gpuva_op *op;
- op = gpuva_op_alloc(mgr);
+ op = gpuva_op_alloc(gpuvm);
if (unlikely(!op))
goto err;
@@ -1442,20 +1442,20 @@ err_free_unmap:
err_free_prev:
kfree(op->remap.prev);
err_free_op:
- gpuva_op_free(mgr, op);
+ gpuva_op_free(gpuvm, op);
err:
return -ENOMEM;
}
-static const struct drm_gpuva_fn_ops gpuva_list_ops = {
+static const struct drm_gpuvm_ops gpuvm_list_ops = {
.sm_step_map = drm_gpuva_sm_step,
.sm_step_remap = drm_gpuva_sm_step,
.sm_step_unmap = drm_gpuva_sm_step,
};
/**
- * drm_gpuva_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
- * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
* @req_addr: the start address of the new mapping
* @req_range: the range of the new mapping
* @req_obj: the &drm_gem_object to map
@@ -1474,9 +1474,9 @@ static const struct drm_gpuva_fn_ops gpuva_list_ops = {
* map operation requested by the caller.
*
* Note that before calling this function again with another mapping request it
- * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
+ * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
* previously obtained operations must be either processed or abandoned. To
- * update the &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
+ * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
* drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
* used.
*
@@ -1486,13 +1486,13 @@ static const struct drm_gpuva_fn_ops gpuva_list_ops = {
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
-drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm,
u64 req_addr, u64 req_range,
struct drm_gem_object *req_obj, u64 req_offset)
{
struct drm_gpuva_ops *ops;
struct {
- struct drm_gpuva_manager *mgr;
+ struct drm_gpuvm *vm;
struct drm_gpuva_ops *ops;
} args;
int ret;
@@ -1503,10 +1503,10 @@ drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
INIT_LIST_HEAD(&ops->list);
- args.mgr = mgr;
+ args.vm = gpuvm;
args.ops = ops;
- ret = __drm_gpuva_sm_map(mgr, &gpuva_list_ops, &args,
+ ret = __drm_gpuvm_sm_map(gpuvm, &gpuvm_list_ops, &args,
req_addr, req_range,
req_obj, req_offset);
if (ret)
@@ -1515,15 +1515,15 @@ drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
return ops;
err_free_ops:
- drm_gpuva_ops_free(mgr, ops);
+ drm_gpuva_ops_free(gpuvm, ops);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create);
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create);
/**
- * drm_gpuva_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
+ * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on
* unmap
- * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
* @req_addr: the start address of the range to unmap
* @req_range: the range of the mappings to unmap
*
@@ -1538,9 +1538,9 @@ EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create);
* remap operations.
*
* Note that before calling this function again with another range to unmap it
- * is necessary to update the &drm_gpuva_manager's view of the GPU VA space. The
+ * is necessary to update the &drm_gpuvm's view of the GPU VA space. The
* previously obtained operations must be processed or abandoned. To update the
- * &drm_gpuva_manager's view of the GPU VA space drm_gpuva_insert(),
+ * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(),
* drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be
* used.
*
@@ -1550,12 +1550,12 @@ EXPORT_SYMBOL_GPL(drm_gpuva_sm_map_ops_create);
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
-drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm,
u64 req_addr, u64 req_range)
{
struct drm_gpuva_ops *ops;
struct {
- struct drm_gpuva_manager *mgr;
+ struct drm_gpuvm *vm;
struct drm_gpuva_ops *ops;
} args;
int ret;
@@ -1566,10 +1566,10 @@ drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
INIT_LIST_HEAD(&ops->list);
- args.mgr = mgr;
+ args.vm = gpuvm;
args.ops = ops;
- ret = __drm_gpuva_sm_unmap(mgr, &gpuva_list_ops, &args,
+ ret = __drm_gpuvm_sm_unmap(gpuvm, &gpuvm_list_ops, &args,
req_addr, req_range);
if (ret)
goto err_free_ops;
@@ -1577,14 +1577,14 @@ drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
return ops;
err_free_ops:
- drm_gpuva_ops_free(mgr, ops);
+ drm_gpuva_ops_free(gpuvm, ops);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap_ops_create);
+EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_ops_create);
/**
- * drm_gpuva_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
- * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * drm_gpuvm_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
* @addr: the start address of the range to prefetch
* @range: the range of the mappings to prefetch
*
@@ -1601,7 +1601,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_sm_unmap_ops_create);
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
-drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm,
u64 addr, u64 range)
{
struct drm_gpuva_ops *ops;
@@ -1616,8 +1616,8 @@ drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
INIT_LIST_HEAD(&ops->list);
- drm_gpuva_for_each_va_range(va, mgr, addr, end) {
- op = gpuva_op_alloc(mgr);
+ drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) {
+ op = gpuva_op_alloc(gpuvm);
if (!op) {
ret = -ENOMEM;
goto err_free_ops;
@@ -1631,14 +1631,14 @@ drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
return ops;
err_free_ops:
- drm_gpuva_ops_free(mgr, ops);
+ drm_gpuva_ops_free(gpuvm, ops);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gpuva_prefetch_ops_create);
+EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create);
/**
- * drm_gpuva_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
- * @mgr: the &drm_gpuva_manager representing the GPU VA space
+ * drm_gpuvm_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM
+ * @gpuvm: the &drm_gpuvm representing the GPU VA space
* @obj: the &drm_gem_object to unmap
*
* This function creates a list of operations to perform unmapping for every
@@ -1656,7 +1656,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_prefetch_ops_create);
* Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure
*/
struct drm_gpuva_ops *
-drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
+drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj)
{
struct drm_gpuva_ops *ops;
@@ -1673,7 +1673,7 @@ drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
INIT_LIST_HEAD(&ops->list);
drm_gem_for_each_gpuva(va, obj) {
- op = gpuva_op_alloc(mgr);
+ op = gpuva_op_alloc(gpuvm);
if (!op) {
ret = -ENOMEM;
goto err_free_ops;
@@ -1687,21 +1687,21 @@ drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
return ops;
err_free_ops:
- drm_gpuva_ops_free(mgr, ops);
+ drm_gpuva_ops_free(gpuvm, ops);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(drm_gpuva_gem_unmap_ops_create);
+EXPORT_SYMBOL_GPL(drm_gpuvm_gem_unmap_ops_create);
/**
* drm_gpuva_ops_free() - free the given &drm_gpuva_ops
- * @mgr: the &drm_gpuva_manager the ops were created for
+ * @gpuvm: the &drm_gpuvm the ops were created for
* @ops: the &drm_gpuva_ops to free
*
* Frees the given &drm_gpuva_ops structure including all the ops associated
* with it.
*/
void
-drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
+drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
struct drm_gpuva_ops *ops)
{
struct drm_gpuva_op *op, *next;
@@ -1715,9 +1715,12 @@ drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
kfree(op->remap.unmap);
}
- gpuva_op_free(mgr, op);
+ gpuva_op_free(gpuvm, op);
}
kfree(ops);
}
EXPORT_SYMBOL_GPL(drm_gpuva_ops_free);
+
+MODULE_DESCRIPTION("DRM GPUVM");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index f03ffbacfe9b..77590b0f38fa 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -776,6 +776,9 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
struct drm_device *dev = file_priv->minor->dev;
int retcode;
+ /* Update drm_file owner if fd was passed along. */
+ drm_file_update_pid(file_priv);
+
if (drm_dev_is_unplugged(dev))
return -ENODEV;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index a395f93449f3..ab6c0c6cd0e2 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -356,9 +356,17 @@ static void fsl_dcu_drm_remove(struct platform_device *pdev)
clk_unregister(fsl_dev->pix_clk);
}
+static void fsl_dcu_drm_shutdown(struct platform_device *pdev)
+{
+ struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(fsl_dev->drm);
+}
+
static struct platform_driver fsl_dcu_drm_platform_driver = {
.probe = fsl_dcu_drm_probe,
.remove_new = fsl_dcu_drm_remove,
+ .shutdown = fsl_dcu_drm_shutdown,
.driver = {
.name = "fsl-dcu",
.pm = &fsl_dcu_drm_pm_ops,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 8a98fa276e8a..57c21ec452b7 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -357,6 +357,11 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
hibmc_unload(dev);
}
+static void hibmc_pci_shutdown(struct pci_dev *pdev)
+{
+ drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+}
+
static const struct pci_device_id hibmc_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0x1711) },
{0,}
@@ -367,6 +372,7 @@ static struct pci_driver hibmc_pci_driver = {
.id_table = hibmc_pci_table,
.probe = hibmc_pci_probe,
.remove = hibmc_pci_remove,
+ .shutdown = hibmc_pci_shutdown,
.driver.pm = &hibmc_pm_ops,
};
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index e8c77bcc6dae..75292a2f4644 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -206,6 +206,7 @@ err_mode_config_cleanup:
static int kirin_drm_kms_cleanup(struct drm_device *dev)
{
drm_kms_helper_poll_fini(dev);
+ drm_atomic_helper_shutdown(dev);
kirin_drm_private_cleanup(dev);
drm_mode_config_cleanup(dev);
@@ -244,6 +245,7 @@ err_kms_cleanup:
kirin_drm_kms_cleanup(drm_dev);
err_drm_dev_put:
drm_dev_put(drm_dev);
+ dev_set_drvdata(dev, NULL);
return ret;
}
@@ -255,6 +257,7 @@ static void kirin_drm_unbind(struct device *dev)
drm_dev_unregister(drm_dev);
kirin_drm_kms_cleanup(drm_dev);
drm_dev_put(drm_dev);
+ dev_set_drvdata(dev, NULL);
}
static const struct component_master_ops kirin_drm_ops = {
@@ -284,6 +287,11 @@ static void kirin_drm_platform_remove(struct platform_device *pdev)
component_master_del(&pdev->dev, &kirin_drm_ops);
}
+static void kirin_drm_platform_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static const struct of_device_id kirin_drm_dt_ids[] = {
{ .compatible = "hisilicon,hi6220-ade",
.data = &ade_driver_data,
@@ -295,6 +303,7 @@ MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids);
static struct platform_driver kirin_drm_platform_driver = {
.probe = kirin_drm_platform_probe,
.remove_new = kirin_drm_platform_remove,
+ .shutdown = kirin_drm_platform_shutdown,
.driver = {
.name = "kirin-drm",
.of_match_table = kirin_drm_dt_ids,
diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
index 8026118c6e03..58b0b46a21e6 100644
--- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
+++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c
@@ -178,6 +178,11 @@ static void hyperv_vmbus_remove(struct hv_device *hdev)
vmbus_free_mmio(hv->mem->start, hv->fb_size);
}
+static void hyperv_vmbus_shutdown(struct hv_device *hdev)
+{
+ drm_atomic_helper_shutdown(hv_get_drvdata(hdev));
+}
+
static int hyperv_vmbus_suspend(struct hv_device *hdev)
{
struct drm_device *dev = hv_get_drvdata(hdev);
@@ -220,6 +225,7 @@ static struct hv_driver hyperv_hv_driver = {
.id_table = hyperv_vmbus_tbl,
.probe = hyperv_vmbus_probe,
.remove = hyperv_vmbus_remove,
+ .shutdown = hyperv_vmbus_shutdown,
.suspend = hyperv_vmbus_suspend,
.resume = hyperv_vmbus_resume,
.driver = {
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index a1bc804cfa15..0d735d5c2b35 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -59,6 +59,9 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+ bman_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+
if (place->fpfn || lpfn != man->size)
bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
@@ -72,18 +75,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
GEM_BUG_ON(min_page_size < mm->chunk_size);
GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
- if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
- place->flags & TTM_PL_FLAG_CONTIGUOUS) {
- unsigned long pages;
-
- size = roundup_pow_of_two(size);
- min_page_size = size;
-
- pages = size >> ilog2(mm->chunk_size);
- if (pages > lpfn)
- lpfn = pages;
- }
-
if (size > lpfn << PAGE_SHIFT) {
err = -E2BIG;
goto err_free_res;
@@ -107,14 +98,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
if (unlikely(err))
goto err_free_blocks;
- if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
- u64 original_size = (u64)bman_res->base.size;
-
- drm_buddy_block_trim(mm,
- original_size,
- &bman_res->blocks);
- }
-
if (lpfn <= bman->visible_size) {
bman_res->used_visible_size = PFN_UP(bman_res->base.size);
} else {
diff --git a/drivers/gpu/drm/imx/dcss/dcss-drv.c b/drivers/gpu/drm/imx/dcss/dcss-drv.c
index c68b0d93ae9e..b61cec0cc79d 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-drv.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-drv.c
@@ -92,6 +92,13 @@ static int dcss_drv_platform_remove(struct platform_device *pdev)
return 0;
}
+static void dcss_drv_platform_shutdown(struct platform_device *pdev)
+{
+ struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
+
+ dcss_kms_shutdown(mdrv->kms);
+}
+
static struct dcss_type_data dcss_types[] = {
[DCSS_IMX8MQ] = {
.name = "DCSS_IMX8MQ",
@@ -114,6 +121,7 @@ MODULE_DEVICE_TABLE(of, dcss_of_match);
static struct platform_driver dcss_platform_driver = {
.probe = dcss_drv_platform_probe,
.remove = dcss_drv_platform_remove,
+ .shutdown = dcss_drv_platform_shutdown,
.driver = {
.name = "imx-dcss",
.of_match_table = dcss_of_match,
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index 896de946f8df..d0ea4e97cded 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -172,3 +172,10 @@ void dcss_kms_detach(struct dcss_kms_dev *kms)
dcss_crtc_deinit(&kms->crtc, drm);
drm->dev_private = NULL;
}
+
+void dcss_kms_shutdown(struct dcss_kms_dev *kms)
+{
+ struct drm_device *drm = &kms->base;
+
+ drm_atomic_helper_shutdown(drm);
+}
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.h b/drivers/gpu/drm/imx/dcss/dcss-kms.h
index dfe5dd99eea3..62521c1fd6d2 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.h
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.h
@@ -34,6 +34,7 @@ struct dcss_kms_dev {
struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss);
void dcss_kms_detach(struct dcss_kms_dev *kms);
+void dcss_kms_shutdown(struct dcss_kms_dev *kms);
int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm);
void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm);
struct dcss_plane *dcss_plane_init(struct drm_device *drm,
diff --git a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
index 352fa31ab4ed..4cfabcf7375a 100644
--- a/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/ipuv3/imx-drm-core.c
@@ -257,6 +257,7 @@ err_poll_fini:
drm_kms_helper_poll_fini(drm);
component_unbind_all(drm->dev, drm);
err_kms:
+ dev_set_drvdata(dev, NULL);
drm_dev_put(drm);
return ret;
@@ -269,6 +270,7 @@ static void imx_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(drm->dev, drm);
@@ -297,6 +299,11 @@ static void imx_drm_platform_remove(struct platform_device *pdev)
component_master_del(&pdev->dev, &imx_drm_ops);
}
+static void imx_drm_platform_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
#ifdef CONFIG_PM_SLEEP
static int imx_drm_suspend(struct device *dev)
{
@@ -324,6 +331,7 @@ MODULE_DEVICE_TABLE(of, imx_drm_dt_ids);
static struct platform_driver imx_drm_pdrv = {
.probe = imx_drm_platform_probe,
.remove_new = imx_drm_platform_remove,
+ .shutdown = imx_drm_platform_shutdown,
.driver = {
.name = "imx-drm",
.pm = &imx_drm_pm_ops,
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
index c2547d48d6aa..0751235007a7 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c
@@ -1130,7 +1130,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
ret = drmm_mode_config_init(drm);
if (ret)
- return ret;
+ goto err_drvdata;
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
@@ -1142,7 +1142,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base)) {
dev_err(dev, "Failed to get memory resource\n");
- return PTR_ERR(base);
+ ret = PTR_ERR(base);
+ goto err_drvdata;
}
regmap_config = ingenic_drm_regmap_config;
@@ -1151,33 +1152,40 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
&regmap_config);
if (IS_ERR(priv->map)) {
dev_err(dev, "Failed to create regmap\n");
- return PTR_ERR(priv->map);
+ ret = PTR_ERR(priv->map);
+ goto err_drvdata;
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ if (irq < 0) {
+ ret = irq;
+ goto err_drvdata;
+ }
if (soc_info->needs_dev_clk) {
priv->lcd_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(priv->lcd_clk)) {
dev_err(dev, "Failed to get lcd clock\n");
- return PTR_ERR(priv->lcd_clk);
+ ret = PTR_ERR(priv->lcd_clk);
+ goto err_drvdata;
}
}
priv->pix_clk = devm_clk_get(dev, "lcd_pclk");
if (IS_ERR(priv->pix_clk)) {
dev_err(dev, "Failed to get pixel clock\n");
- return PTR_ERR(priv->pix_clk);
+ ret = PTR_ERR(priv->pix_clk);
+ goto err_drvdata;
}
priv->dma_hwdescs = dmam_alloc_coherent(dev,
sizeof(*priv->dma_hwdescs),
&priv->dma_hwdescs_phys,
GFP_KERNEL);
- if (!priv->dma_hwdescs)
- return -ENOMEM;
+ if (!priv->dma_hwdescs) {
+ ret = -ENOMEM;
+ goto err_drvdata;
+ }
/* Configure DMA hwdesc for foreground0 plane */
ingenic_drm_configure_hwdesc_plane(priv, 0);
@@ -1199,7 +1207,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
dev_err(dev, "Failed to register plane: %i\n", ret);
- return ret;
+ goto err_drvdata;
}
if (soc_info->map_noncoherent)
@@ -1211,7 +1219,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
NULL, &ingenic_drm_crtc_funcs, NULL);
if (ret) {
dev_err(dev, "Failed to init CRTC: %i\n", ret);
- return ret;
+ goto err_drvdata;
}
drm_crtc_enable_color_mgmt(&priv->crtc, 0, false,
@@ -1230,7 +1238,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
if (ret) {
dev_err(dev, "Failed to register overlay plane: %i\n",
ret);
- return ret;
+ goto err_drvdata;
}
if (soc_info->map_noncoherent)
@@ -1241,17 +1249,18 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
if (ret) {
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to bind components: %i\n", ret);
- return ret;
+ goto err_drvdata;
}
ret = devm_add_action_or_reset(dev, ingenic_drm_unbind_all, priv);
if (ret)
- return ret;
+ goto err_drvdata;
priv->ipu_plane = drm_plane_from_index(drm, 2);
if (!priv->ipu_plane) {
dev_err(dev, "Failed to retrieve IPU plane\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_drvdata;
}
}
}
@@ -1263,7 +1272,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
break; /* we're done */
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get bridge handle\n");
- return ret;
+ goto err_drvdata;
}
if (panel)
@@ -1275,7 +1284,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
if (IS_ERR(ib)) {
ret = PTR_ERR(ib);
dev_err(dev, "Failed to init encoder: %d\n", ret);
- return ret;
+ goto err_drvdata;
}
encoder = &ib->encoder;
@@ -1290,13 +1299,14 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
dev_err(dev, "Unable to attach bridge\n");
- return ret;
+ goto err_drvdata;
}
connector = drm_bridge_connector_init(drm, encoder);
if (IS_ERR(connector)) {
dev_err(dev, "Unable to init connector\n");
- return PTR_ERR(connector);
+ ret = PTR_ERR(connector);
+ goto err_drvdata;
}
drm_connector_attach_encoder(connector, encoder);
@@ -1313,13 +1323,13 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
ret = devm_request_irq(dev, irq, ingenic_drm_irq_handler, 0, drm->driver->name, drm);
if (ret) {
dev_err(dev, "Unable to install IRQ handler\n");
- return ret;
+ goto err_drvdata;
}
ret = drm_vblank_init(drm, 1);
if (ret) {
dev_err(dev, "Failed calling drm_vblank_init()\n");
- return ret;
+ goto err_drvdata;
}
drm_mode_config_reset(drm);
@@ -1327,7 +1337,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
ret = clk_prepare_enable(priv->pix_clk);
if (ret) {
dev_err(dev, "Unable to start pixel clock\n");
- return ret;
+ goto err_drvdata;
}
if (priv->lcd_clk) {
@@ -1402,6 +1412,8 @@ err_devclk_disable:
clk_disable_unprepare(priv->lcd_clk);
err_pixclk_disable:
clk_disable_unprepare(priv->pix_clk);
+err_drvdata:
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -1422,6 +1434,7 @@ static void ingenic_drm_unbind(struct device *dev)
drm_dev_unregister(&priv->drm);
drm_atomic_helper_shutdown(&priv->drm);
+ dev_set_drvdata(dev, NULL);
}
static const struct component_master_ops ingenic_master_ops = {
@@ -1459,6 +1472,14 @@ static void ingenic_drm_remove(struct platform_device *pdev)
component_master_del(dev, &ingenic_master_ops);
}
+static void ingenic_drm_shutdown(struct platform_device *pdev)
+{
+ struct ingenic_drm *priv = platform_get_drvdata(pdev);
+
+ if (priv)
+ drm_atomic_helper_shutdown(&priv->drm);
+}
+
static int ingenic_drm_suspend(struct device *dev)
{
struct ingenic_drm *priv = dev_get_drvdata(dev);
@@ -1610,6 +1631,7 @@ static struct platform_driver ingenic_drm_driver = {
},
.probe = ingenic_drm_probe,
.remove_new = ingenic_drm_remove,
+ .shutdown = ingenic_drm_shutdown,
};
static int ingenic_drm_init(void)
diff --git a/drivers/gpu/drm/logicvc/logicvc_drm.c b/drivers/gpu/drm/logicvc/logicvc_drm.c
index 749debd3d6a5..01a37e28c080 100644
--- a/drivers/gpu/drm/logicvc/logicvc_drm.c
+++ b/drivers/gpu/drm/logicvc/logicvc_drm.c
@@ -482,6 +482,14 @@ static void logicvc_drm_remove(struct platform_device *pdev)
of_reserved_mem_device_release(dev);
}
+static void logicvc_drm_shutdown(struct platform_device *pdev)
+{
+ struct logicvc_drm *logicvc = platform_get_drvdata(pdev);
+ struct drm_device *drm_dev = &logicvc->drm_dev;
+
+ drm_atomic_helper_shutdown(drm_dev);
+}
+
static const struct of_device_id logicvc_drm_of_table[] = {
{ .compatible = "xylon,logicvc-3.02.a-display" },
{ .compatible = "xylon,logicvc-4.01.a-display" },
@@ -492,6 +500,7 @@ MODULE_DEVICE_TABLE(of, logicvc_drm_of_table);
static struct platform_driver logicvc_drm_platform_driver = {
.probe = logicvc_drm_probe,
.remove_new = logicvc_drm_remove,
+ .shutdown = logicvc_drm_shutdown,
.driver = {
.name = "logicvc-drm",
.of_match_table = logicvc_drm_of_table,
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c
index 188ec82afcfb..89ccc0c43169 100644
--- a/drivers/gpu/drm/loongson/lsdc_drv.c
+++ b/drivers/gpu/drm/loongson/lsdc_drv.c
@@ -327,6 +327,11 @@ static void lsdc_pci_remove(struct pci_dev *pdev)
drm_atomic_helper_shutdown(ddev);
}
+static void lsdc_pci_shutdown(struct pci_dev *pdev)
+{
+ drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+}
+
static int lsdc_drm_freeze(struct drm_device *ddev)
{
struct lsdc_device *ldev = to_lsdc(ddev);
@@ -447,6 +452,7 @@ struct pci_driver lsdc_pci_driver = {
.id_table = lsdc_pciid_list,
.probe = lsdc_pci_probe,
.remove = lsdc_pci_remove,
+ .shutdown = lsdc_pci_shutdown,
.driver.pm = &lsdc_pm_ops,
};
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index a2572fb311f0..10c06440c7e7 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -459,6 +459,14 @@ static void mcde_remove(struct platform_device *pdev)
regulator_disable(mcde->epod);
}
+static void mcde_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (drm->registered)
+ drm_atomic_helper_shutdown(drm);
+}
+
static const struct of_device_id mcde_of_match[] = {
{
.compatible = "ste,mcde",
@@ -473,6 +481,7 @@ static struct platform_driver mcde_driver = {
},
.probe = mcde_probe,
.remove_new = mcde_remove,
+ .shutdown = mcde_shutdown,
};
static struct platform_driver *const component_drivers[] = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index abddf37f0ea1..2fb18b782b05 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -10,6 +10,7 @@
#include <linux/pci.h>
#include <drm/drm_aperture.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
@@ -278,6 +279,12 @@ static void mgag200_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
+ drm_atomic_helper_shutdown(dev);
+}
+
+static void mgag200_pci_shutdown(struct pci_dev *pdev)
+{
+ drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
}
static struct pci_driver mgag200_pci_driver = {
@@ -285,6 +292,7 @@ static struct pci_driver mgag200_pci_driver = {
.id_table = mgag200_pciidlist,
.probe = mgag200_pci_probe,
.remove = mgag200_pci_remove,
+ .shutdown = mgag200_pci_shutdown,
};
drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset);
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index c52e8096cca4..1e6aaf95ff7c 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -11,6 +11,7 @@ config DRM_NOUVEAU
select DRM_TTM
select DRM_TTM_HELPER
select DRM_EXEC
+ select DRM_GPUVM
select DRM_SCHED
select I2C
select I2C_ALGOBIT
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index e9ac3fb27ff7..13705c5f1497 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -256,7 +256,7 @@ nv04_display_create(struct drm_device *dev)
for (i = 0; i < dcb->entries; i++) {
struct dcb_output *dcbent = &dcb->entry[i];
- connector = nouveau_connector_create(dev, dcbent);
+ connector = nouveau_connector_create(dev, dcbent->connector);
if (IS_ERR(connector))
continue;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index bba01fa0780c..52f1569ee37c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -66,8 +66,6 @@
#include "nouveau_fence.h"
#include "nv50_display.h"
-#include <subdev/bios/dp.h>
-
/******************************************************************************
* EVO channel
*****************************************************************************/
@@ -477,7 +475,6 @@ nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st
core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL);
nv_encoder->crtc = NULL;
- nvif_outp_release(&nv_encoder->outp);
}
static void
@@ -502,7 +499,8 @@ nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, PROTOCOL, RGB_CRT);
- nvif_outp_acquire_rgb_crt(&nv_encoder->outp);
+ if (!nvif_outp_acquired(&nv_encoder->outp))
+ nvif_outp_acquire_dac(&nv_encoder->outp);
core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, asyh);
asyh->or.depth = 0;
@@ -553,34 +551,27 @@ nv50_dac_func = {
};
static int
-nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
+nv50_dac_create(struct nouveau_encoder *nv_encoder)
{
+ struct drm_connector *connector = &nv_encoder->conn->base;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nv50_disp *disp = nv50_disp(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus;
- struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ struct dcb_output *dcbe = nv_encoder->dcb;
int type = DRM_MODE_ENCODER_DAC;
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
-
bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
if (bus)
nv_encoder->i2c = &bus->i2c;
encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
"dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_dac_help);
drm_connector_attach_encoder(connector, encoder);
- return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
+ return 0;
}
/*
@@ -617,7 +608,7 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
continue; /* TODO */
nv_encoder = nouveau_encoder(encoder);
- nv_connector = nouveau_connector(nv_encoder->audio.connector);
+ nv_connector = nv_encoder->conn;
nv_crtc = nouveau_crtc(nv_encoder->crtc);
if (!nv_crtc || nv_encoder->outp.or.id != port || nv_crtc->index != dev_id)
@@ -713,6 +704,18 @@ nv50_audio_supported(struct drm_encoder *encoder)
disp->disp->object.oclass == GT206_DISP)
return false;
+ if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ switch (nv_encoder->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_DP:
+ break;
+ default:
+ return false;
+ }
+ }
+
return true;
}
@@ -729,7 +732,6 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
mutex_lock(&drm->audio.lock);
if (nv_encoder->audio.enabled) {
nv_encoder->audio.enabled = false;
- nv_encoder->audio.connector = NULL;
nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, NULL, 0);
}
mutex_unlock(&drm->audio.lock);
@@ -754,7 +756,6 @@ nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, nv_connector->base.eld,
drm_eld_size(nv_connector->base.eld));
nv_encoder->audio.enabled = true;
- nv_encoder->audio.connector = &nv_connector->base;
mutex_unlock(&drm->audio.lock);
@@ -774,7 +775,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
struct drm_hdmi_info *hdmi = &nv_connector->base.display_info.hdmi;
union hdmi_infoframe infoframe = { 0 };
const u8 rekey = 56; /* binary driver, and tegra, constant */
- u8 scdc = 0;
u32 max_ac_packet;
struct {
struct nvif_outp_infoframe_v0 infoframe;
@@ -787,8 +787,9 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
max_ac_packet -= 18; /* constant from tegra */
max_ac_packet /= 32;
- if (hdmi->scdc.scrambling.supported) {
+ if (nv_encoder->i2c && hdmi->scdc.scrambling.supported) {
const bool high_tmds_clock_ratio = mode->clock > 340000;
+ u8 scdc;
ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &scdc);
if (ret < 0) {
@@ -808,8 +809,9 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
scdc, ret);
}
- ret = nvif_outp_acquire_tmds(&nv_encoder->outp, nv_crtc->index, true,
- max_ac_packet, rekey, scdc, hda);
+ ret = nvif_outp_hdmi(&nv_encoder->outp, nv_crtc->index, true, max_ac_packet, rekey,
+ mode->clock, hdmi->scdc.supported, hdmi->scdc.scrambling.supported,
+ hdmi->scdc.scrambling.low_rates);
if (ret)
return;
@@ -838,7 +840,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size);
- nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
+ nv_encoder->hdmi.enabled = true;
}
/******************************************************************************
@@ -865,6 +867,8 @@ struct nv50_msto {
struct nv50_mstc *mstc;
bool disabled;
bool enabled;
+
+ u32 display_id;
};
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
@@ -893,10 +897,17 @@ nv50_msto_cleanup(struct drm_atomic_state *state,
drm_atomic_get_old_mst_topology_state(state, mgr);
const struct drm_dp_mst_atomic_payload *old_payload =
drm_atomic_get_mst_payload_state(old_mst_state, msto->mstc->port);
+ struct nv50_mstc *mstc = msto->mstc;
+ struct nv50_mstm *mstm = mstc->mstm;
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
if (msto->disabled) {
+ if (msto->head->func->display_id) {
+ nvif_outp_dp_mst_id_put(&mstm->outp->outp, msto->display_id);
+ msto->display_id = 0;
+ }
+
msto->mstc = NULL;
msto->disabled = false;
drm_dp_remove_payload_part2(mgr, new_mst_state, old_payload, new_payload);
@@ -916,23 +927,27 @@ nv50_msto_prepare(struct drm_atomic_state *state,
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
struct drm_dp_mst_atomic_payload *payload;
+ int ret = 0;
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
- // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
if (msto->disabled) {
drm_dp_remove_payload_part1(mgr, mst_state, payload);
-
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
+ ret = 1;
} else {
if (msto->enabled)
- drm_dp_add_payload_part1(mgr, mst_state, payload);
+ ret = drm_dp_add_payload_part1(mgr, mst_state, payload);
+ }
+ if (ret == 0) {
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index,
payload->vc_start_slot, payload->time_slots,
payload->pbn, payload->time_slots * mst_state->pbn_div);
+ } else {
+ nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
}
}
@@ -1029,8 +1044,13 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
return;
if (!mstm->links++) {
- /*XXX: MST audio. */
- nvif_outp_acquire_dp(&mstm->outp->outp, mstm->outp->dp.dpcd, 0, 0, false, true);
+ nvif_outp_acquire_sor(&mstm->outp->outp, false /*TODO: MST audio... */);
+ nouveau_dp_train(mstm->outp, true, 0, 0);
+ }
+
+ if (head->func->display_id) {
+ if (!WARN_ON(nvif_outp_dp_mst_id_get(&mstm->outp->outp, &msto->display_id)))
+ head->func->display_id(head, msto->display_id);
}
if (mstm->outp->outp.or.link & 1)
@@ -1053,6 +1073,9 @@ nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *s
struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm;
+ if (msto->head->func->display_id)
+ msto->head->func->display_id(msto->head, 0);
+
mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
mstm->modified = true;
if (!--mstm->links)
@@ -1291,6 +1314,12 @@ nv50_mstm_cleanup(struct drm_atomic_state *state,
}
}
+ if (mstm->disabled) {
+ nouveau_dp_power_down(mstm->outp);
+ nvif_outp_release(&mstm->outp->outp);
+ mstm->disabled = false;
+ }
+
mstm->modified = false;
}
@@ -1325,12 +1354,6 @@ nv50_mstm_prepare(struct drm_atomic_state *state,
nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
}
}
-
- if (mstm->disabled) {
- if (!mstm->links)
- nvif_outp_release(&mstm->outp->outp);
- mstm->disabled = false;
- }
}
static struct drm_connector *
@@ -1536,7 +1559,7 @@ static void
nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ struct nv50_head *head = nv50_head(nv_encoder->crtc);
struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
@@ -1544,7 +1567,6 @@ nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st
#endif
struct drm_dp_aux *aux = &nv_connector->aux;
int ret;
- u8 pwr;
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
if (backlight && backlight->uses_dpcd) {
@@ -1555,19 +1577,20 @@ nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st
}
#endif
- if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
- ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
-
- if (ret == 0) {
- pwr &= ~DP_SET_POWER_MASK;
- pwr |= DP_SET_POWER_D3;
- drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
- }
+ if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS && nv_encoder->hdmi.enabled) {
+ nvif_outp_hdmi(&nv_encoder->outp, head->base.index,
+ false, 0, 0, 0, false, false, false);
+ nv_encoder->hdmi.enabled = false;
}
- nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
- nv50_audio_disable(encoder, nv_crtc);
- nvif_outp_release(&nv_encoder->outp);
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ nouveau_dp_power_down(nv_encoder);
+
+ if (head->func->display_id)
+ head->func->display_id(head, 0);
+
+ nv_encoder->update(nv_encoder, head->base.index, NULL, 0, 0);
+ nv50_audio_disable(encoder, &head->base);
nv_encoder->crtc = NULL;
}
@@ -1580,6 +1603,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
struct nv50_disp *disp = nv50_disp(encoder->dev);
+ struct nv50_head *head = nv50_head(&nv_crtc->base);
struct nvif_outp *outp = &nv_encoder->outp;
struct drm_device *dev = encoder->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -1597,15 +1621,17 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
if ((disp->disp->object.oclass == GT214_DISP ||
disp->disp->object.oclass >= GF110_DISP) &&
+ nv_encoder->dcb->type != DCB_OUTPUT_LVDS &&
drm_detect_monitor_audio(nv_connector->edid))
hda = true;
+ if (!nvif_outp_acquired(outp))
+ nvif_outp_acquire_sor(outp, hda);
+
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
- if (disp->disp->object.oclass == NV50_DISP ||
- !drm_detect_hdmi_monitor(nv_connector->edid))
- nvif_outp_acquire_tmds(outp, nv_crtc->index, false, 0, 0, 0, false);
- else
+ if (disp->disp->object.oclass != NV50_DISP &&
+ drm_detect_hdmi_monitor(nv_connector->edid))
nv50_hdmi_enable(encoder, nv_crtc, nv_connector, state, mode, hda);
if (nv_encoder->outp.or.link & 1) {
@@ -1651,10 +1677,10 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
lvds_8bpc = true;
}
- nvif_outp_acquire_lvds(&nv_encoder->outp, lvds_dual, lvds_8bpc);
+ nvif_outp_lvds(&nv_encoder->outp, lvds_dual, lvds_8bpc);
break;
case DCB_OUTPUT_DP:
- nvif_outp_acquire_dp(&nv_encoder->outp, nv_encoder->dp.dpcd, 0, 0, hda, false);
+ nouveau_dp_train(nv_encoder, false, mode->clock, asyh->or.bpc);
depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
if (nv_encoder->outp.or.link & 1)
@@ -1662,8 +1688,6 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
else
proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
- nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
-
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
backlight = nv_connector->backlight;
if (backlight && backlight->uses_dpcd)
@@ -1677,6 +1701,9 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
break;
}
+ if (head->func->display_id)
+ head->func->display_id(head, BIT(nv_encoder->outp.id));
+
nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
}
@@ -1692,14 +1719,13 @@ nv50_sor_destroy(struct drm_encoder *encoder)
{
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- nvif_outp_dtor(&nv_encoder->outp);
-
nv50_mstm_del(&nv_encoder->dp.mstm);
drm_encoder_cleanup(encoder);
if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
+ nvif_outp_dtor(&nv_encoder->outp);
kfree(encoder);
}
@@ -1708,24 +1734,15 @@ nv50_sor_func = {
.destroy = nv50_sor_destroy,
};
-bool nv50_has_mst(struct nouveau_drm *drm)
-{
- struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
- u32 data;
- u8 ver, hdr, cnt, len;
-
- data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
- return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
-}
-
static int
-nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
+nv50_sor_create(struct nouveau_encoder *nv_encoder)
{
+ struct drm_connector *connector = &nv_encoder->conn->base;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
- struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ struct dcb_output *dcbe = nv_encoder->dcb;
struct nv50_disp *disp = nv50_disp(connector->dev);
int type, ret;
@@ -1738,15 +1755,9 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
break;
}
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
nv_encoder->update = nv50_sor_update;
encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
"sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_sor_help);
@@ -1757,40 +1768,40 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
nv50_outp_dump_caps(drm, nv_encoder);
if (dcbe->type == DCB_OUTPUT_DP) {
- struct nvkm_i2c_aux *aux =
- nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
-
mutex_init(&nv_encoder->dp.hpd_irq_lock);
- if (aux) {
- if (disp->disp->object.oclass < GF110_DISP) {
- /* HW has no support for address-only
- * transactions, so we're required to
- * use custom I2C-over-AUX code.
- */
- nv_encoder->i2c = &aux->i2c;
- } else {
- nv_encoder->i2c = &nv_connector->aux.ddc;
- }
- nv_encoder->aux = aux;
+ if (disp->disp->object.oclass < GF110_DISP) {
+ /* HW has no support for address-only
+ * transactions, so we're required to
+ * use custom I2C-over-AUX code.
+ */
+ struct nvkm_i2c_aux *aux;
+
+ aux = nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+ if (!aux)
+ return -EINVAL;
+
+ nv_encoder->i2c = &aux->i2c;
+ } else {
+ nv_encoder->i2c = &nv_connector->aux.ddc;
}
- if (nv_connector->type != DCB_CONNECTOR_eDP &&
- nv50_has_mst(drm)) {
+ if (nv_connector->type != DCB_CONNECTOR_eDP && nv_encoder->outp.info.dp.mst) {
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
16, nv_connector->base.base.id,
&nv_encoder->dp.mstm);
if (ret)
return ret;
}
- } else {
+ } else
+ if (nv_encoder->outp.info.ddc != NVIF_OUTP_DDC_INVALID) {
struct nvkm_i2c_bus *bus =
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
if (bus)
nv_encoder->i2c = &bus->i2c;
}
- return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
+ return 0;
}
/******************************************************************************
@@ -1817,7 +1828,6 @@ nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *s
core->func->pior->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL);
nv_encoder->crtc = NULL;
- nvif_outp_release(&nv_encoder->outp);
}
static void
@@ -1845,14 +1855,16 @@ nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
default: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; break;
}
+ if (!nvif_outp_acquired(&nv_encoder->outp))
+ nvif_outp_acquire_pior(&nv_encoder->outp);
+
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
- nvif_outp_acquire_tmds(&nv_encoder->outp, false, false, 0, 0, 0, false);
break;
case DCB_OUTPUT_DP:
ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
- nvif_outp_acquire_dp(&nv_encoder->outp, nv_encoder->dp.dpcd, 0, 0, false, false);
+ nouveau_dp_train(nv_encoder, false, asyh->state.adjusted_mode.clock, 6);
break;
default:
BUG();
@@ -1889,8 +1901,9 @@ nv50_pior_func = {
};
static int
-nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
+nv50_pior_create(struct nouveau_encoder *nv_encoder)
{
+ struct drm_connector *connector = &nv_encoder->conn->base;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv50_disp *disp = nv50_disp(dev);
@@ -1898,18 +1911,18 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
struct i2c_adapter *ddc;
- struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ struct dcb_output *dcbe = nv_encoder->dcb;
int type;
switch (dcbe->type) {
case DCB_OUTPUT_TMDS:
- bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
+ bus = nvkm_i2c_bus_find(i2c, nv_encoder->outp.info.ddc);
ddc = bus ? &bus->i2c : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
case DCB_OUTPUT_DP:
- aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
+ aux = nvkm_i2c_aux_find(i2c, nv_encoder->outp.info.dp.aux);
ddc = aux ? &aux->i2c : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
@@ -1917,18 +1930,11 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
return -ENODEV;
}
- nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
- if (!nv_encoder)
- return -ENOMEM;
- nv_encoder->dcb = dcbe;
nv_encoder->i2c = ddc;
- nv_encoder->aux = aux;
mutex_init(&nv_encoder->dp.hpd_irq_lock);
encoder = to_drm_encoder(nv_encoder);
- encoder->possible_crtcs = dcbe->heads;
- encoder->possible_clones = 0;
drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
"pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
drm_encoder_helper_add(encoder, &nv50_pior_help);
@@ -1938,7 +1944,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
nv50_outp_dump_caps(drm, nv_encoder);
- return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
+ return 0;
}
/******************************************************************************
@@ -1952,7 +1958,9 @@ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
struct drm_dp_mst_topology_state *mst_state;
struct nouveau_drm *drm = nouveau_drm(state->dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nv50_atom *atom = nv50_atom(state);
struct nv50_core *core = disp->core;
+ struct nv50_outp_atom *outp;
struct nv50_mstm *mstm;
int i;
@@ -1975,6 +1983,23 @@ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
if (mstm->modified)
nv50_mstm_cleanup(state, mst_state, mstm);
}
+
+ list_for_each_entry(outp, &atom->outp, head) {
+ if (outp->encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(outp->encoder);
+
+ if (outp->enabled) {
+ nv50_audio_enable(outp->encoder, nouveau_crtc(nv_encoder->crtc),
+ nv_encoder->conn, NULL, NULL);
+ outp->enabled = outp->disabled = false;
+ } else {
+ if (outp->disabled) {
+ nvif_outp_release(&nv_encoder->outp);
+ outp->disabled = false;
+ }
+ }
+ }
+ }
}
static void
@@ -2066,14 +2091,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
if (outp->clr.mask) {
help->atomic_disable(encoder, state);
+ outp->disabled = true;
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
- if (outp->flush_disable) {
- nv50_disp_atomic_commit_wndw(state, interlock);
- nv50_disp_atomic_commit_core(state, interlock);
- memset(interlock, 0x00, sizeof(interlock));
-
- flushed = true;
- }
}
}
@@ -2093,7 +2112,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
nv50_crc_atomic_init_notifier_contexts(state);
/* Update output path(s). */
- list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ list_for_each_entry(outp, &atom->outp, head) {
const struct drm_encoder_helper_funcs *help;
struct drm_encoder *encoder;
@@ -2105,11 +2124,9 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
if (outp->set.mask) {
help->atomic_enable(encoder, state);
+ outp->enabled = true;
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
}
-
- list_del(&outp->head);
- kfree(outp);
}
/* Update head(s). */
@@ -2207,6 +2224,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
if (atom->lock_core)
mutex_unlock(&disp->mutex);
+ list_for_each_entry_safe(outp, outt, &atom->outp, head) {
+ list_del(&outp->head);
+ kfree(outp);
+ }
+
/* Wait for HW to signal completion. */
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
@@ -2355,10 +2377,9 @@ nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
if (IS_ERR(outp))
return PTR_ERR(outp);
- if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
- outp->flush_disable = true;
+ if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST ||
+ nouveau_encoder(outp->encoder)->dcb->type == DCB_OUTPUT_DP)
atom->flush_disable = true;
- }
outp->clr.ctrl = true;
atom->lock_core = true;
}
@@ -2519,6 +2540,104 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
cancel_work_sync(&drm->hpd_work);
}
+static inline void
+nv50_display_read_hw_or_state(struct drm_device *dev, struct nv50_disp *disp,
+ struct nouveau_encoder *outp)
+{
+ struct drm_crtc *crtc;
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *conn;
+ struct nv50_head_atom *armh;
+ const u32 encoder_mask = drm_encoder_mask(&outp->base.base);
+ bool found_conn = false, found_head = false;
+ u8 proto;
+ int head_idx;
+ int ret;
+
+ switch (outp->dcb->type) {
+ case DCB_OUTPUT_TMDS:
+ ret = nvif_outp_inherit_tmds(&outp->outp, &proto);
+ break;
+ case DCB_OUTPUT_DP:
+ ret = nvif_outp_inherit_dp(&outp->outp, &proto);
+ break;
+ case DCB_OUTPUT_LVDS:
+ ret = nvif_outp_inherit_lvds(&outp->outp, &proto);
+ break;
+ case DCB_OUTPUT_ANALOG:
+ ret = nvif_outp_inherit_rgb_crt(&outp->outp, &proto);
+ break;
+ default:
+ drm_dbg_kms(dev, "Readback for %s not implemented yet, skipping\n",
+ outp->base.base.name);
+ drm_WARN_ON(dev, true);
+ return;
+ }
+
+ if (ret < 0)
+ return;
+
+ head_idx = ret;
+
+ drm_for_each_crtc(crtc, dev) {
+ if (crtc->index != head_idx)
+ continue;
+
+ armh = nv50_head_atom(crtc->state);
+ found_head = true;
+ break;
+ }
+ if (drm_WARN_ON(dev, !found_head))
+ return;
+
+ /* Figure out which connector is being used by this encoder */
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(conn, &conn_iter) {
+ if (nouveau_connector(conn)->index == outp->dcb->connector) {
+ found_conn = true;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ if (drm_WARN_ON(dev, !found_conn))
+ return;
+
+ armh->state.encoder_mask = encoder_mask;
+ armh->state.connector_mask = drm_connector_mask(conn);
+ armh->state.active = true;
+ armh->state.enable = true;
+ pm_runtime_get_noresume(dev->dev);
+
+ outp->crtc = crtc;
+ outp->ctrl = NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto) | BIT(crtc->index);
+
+ drm_connector_get(conn);
+ conn->state->crtc = crtc;
+ conn->state->best_encoder = &outp->base.base;
+}
+
+/* Read back the currently programmed display state */
+static void
+nv50_display_read_hw_state(struct nouveau_drm *drm)
+{
+ struct drm_device *dev = drm->dev;
+ struct drm_encoder *encoder;
+ struct drm_modeset_acquire_ctx ctx;
+ struct nv50_disp *disp = nv50_disp(dev);
+ int ret;
+
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
+
+ drm_for_each_encoder(encoder, dev) {
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
+ continue;
+
+ nv50_display_read_hw_or_state(dev, disp, nouveau_encoder(encoder));
+ }
+
+ DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
+}
+
static int
nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
{
@@ -2536,6 +2655,9 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
}
}
+ if (!resume)
+ nv50_display_read_hw_state(nouveau_drm(dev));
+
return 0;
}
@@ -2562,14 +2684,11 @@ nv50_display_destroy(struct drm_device *dev)
int
nv50_display_create(struct drm_device *dev)
{
- struct nvif_device *device = &nouveau_drm(dev)->client.device;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_table *dcb = &drm->vbios.dcb;
struct drm_connector *connector, *tmp;
struct nv50_disp *disp;
- struct dcb_output *dcbe;
- int crtcs, ret, i;
- bool has_mst = nv50_has_mst(drm);
+ int ret, i;
+ bool has_mst = false;
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
if (!disp)
@@ -2645,20 +2764,92 @@ nv50_display_create(struct drm_device *dev)
dev->mode_config.cursor_height = 64;
}
- /* create crtc objects to represent the hw heads */
- if (disp->disp->object.oclass >= GV100_DISP)
- crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
- else
- if (disp->disp->object.oclass >= GF110_DISP)
- crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
- else
- crtcs = 0x3;
+ /* create encoder/connector objects based on VBIOS DCB table */
+ for_each_set_bit(i, &disp->disp->outp_mask, sizeof(disp->disp->outp_mask) * 8) {
+ struct nouveau_encoder *outp;
- for (i = 0; i < fls(crtcs); i++) {
- struct nv50_head *head;
+ outp = kzalloc(sizeof(*outp), GFP_KERNEL);
+ if (!outp)
+ break;
- if (!(crtcs & (1 << i)))
+ ret = nvif_outp_ctor(disp->disp, "kmsOutp", i, &outp->outp);
+ if (ret) {
+ kfree(outp);
continue;
+ }
+
+ connector = nouveau_connector_create(dev, outp->outp.info.conn);
+ if (IS_ERR(connector)) {
+ nvif_outp_dtor(&outp->outp);
+ kfree(outp);
+ continue;
+ }
+
+ outp->base.base.possible_crtcs = outp->outp.info.heads;
+ outp->base.base.possible_clones = 0;
+ outp->conn = nouveau_connector(connector);
+
+ outp->dcb = kzalloc(sizeof(*outp->dcb), GFP_KERNEL);
+ if (!outp->dcb)
+ break;
+
+ switch (outp->outp.info.proto) {
+ case NVIF_OUTP_RGB_CRT:
+ outp->dcb->type = DCB_OUTPUT_ANALOG;
+ outp->dcb->crtconf.maxfreq = outp->outp.info.rgb_crt.freq_max;
+ break;
+ case NVIF_OUTP_TMDS:
+ outp->dcb->type = DCB_OUTPUT_TMDS;
+ outp->dcb->duallink_possible = outp->outp.info.tmds.dual;
+ break;
+ case NVIF_OUTP_LVDS:
+ outp->dcb->type = DCB_OUTPUT_LVDS;
+ outp->dcb->lvdsconf.use_acpi_for_edid = outp->outp.info.lvds.acpi_edid;
+ break;
+ case NVIF_OUTP_DP:
+ outp->dcb->type = DCB_OUTPUT_DP;
+ outp->dcb->dpconf.link_nr = outp->outp.info.dp.link_nr;
+ outp->dcb->dpconf.link_bw = outp->outp.info.dp.link_bw;
+ if (outp->outp.info.dp.mst)
+ has_mst = true;
+ break;
+ default:
+ WARN_ON(1);
+ continue;
+ }
+
+ outp->dcb->heads = outp->outp.info.heads;
+ outp->dcb->connector = outp->outp.info.conn;
+ outp->dcb->i2c_index = outp->outp.info.ddc;
+
+ switch (outp->outp.info.type) {
+ case NVIF_OUTP_DAC : ret = nv50_dac_create(outp); break;
+ case NVIF_OUTP_SOR : ret = nv50_sor_create(outp); break;
+ case NVIF_OUTP_PIOR: ret = nv50_pior_create(outp); break;
+ default:
+ WARN_ON(1);
+ continue;
+ }
+
+ if (ret) {
+ NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
+ i, outp->outp.info.type, outp->outp.info.proto, ret);
+ }
+ }
+
+ /* cull any connectors we created that don't have an encoder */
+ list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
+ if (connector->possible_encoders)
+ continue;
+
+ NV_WARN(drm, "%s has no encoders, removing\n",
+ connector->name);
+ connector->funcs->destroy(connector);
+ }
+
+ /* create crtc objects to represent the hw heads */
+ for_each_set_bit(i, &disp->disp->head_mask, sizeof(disp->disp->head_mask) * 8) {
+ struct nv50_head *head;
head = nv50_head_create(dev, i);
if (IS_ERR(head)) {
@@ -2684,52 +2875,10 @@ nv50_display_create(struct drm_device *dev)
* Once these issues are closed, this should be
* removed
*/
- head->msto->encoder.possible_crtcs = crtcs;
- }
- }
-
- /* create encoder/connector objects based on VBIOS DCB table */
- for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
- connector = nouveau_connector_create(dev, dcbe);
- if (IS_ERR(connector))
- continue;
-
- if (dcbe->location == DCB_LOC_ON_CHIP) {
- switch (dcbe->type) {
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- case DCB_OUTPUT_DP:
- ret = nv50_sor_create(connector, dcbe);
- break;
- case DCB_OUTPUT_ANALOG:
- ret = nv50_dac_create(connector, dcbe);
- break;
- default:
- ret = -ENODEV;
- break;
- }
- } else {
- ret = nv50_pior_create(connector, dcbe);
- }
-
- if (ret) {
- NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
- dcbe->location, dcbe->type,
- ffs(dcbe->or) - 1, ret);
- ret = 0;
+ head->msto->encoder.possible_crtcs = disp->disp->head_mask;
}
}
- /* cull any connectors we created that don't have an encoder */
- list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
- if (connector->possible_encoders)
- continue;
-
- NV_WARN(drm, "%s has no encoders, removing\n",
- connector->name);
- connector->funcs->destroy(connector);
- }
-
/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
dev->vblank_disable_immediate = true;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index 9d66c9c726c3..5508a7cfd492 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -83,7 +83,9 @@ struct nv50_outp_atom {
struct list_head head;
struct drm_encoder *encoder;
- bool flush_disable;
+
+ bool disabled;
+ bool enabled;
union nv50_outp_atom_mask {
struct {
@@ -106,8 +108,6 @@ void nv50_dmac_destroy(struct nv50_dmac *);
*/
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
-bool nv50_has_mst(struct nouveau_drm *drm);
-
u32 *evo_wait(struct nv50_dmac *, int nr);
void evo_kick(u32 *, struct nv50_dmac *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.h b/drivers/gpu/drm/nouveau/dispnv50/head.h
index 41c8788dfb31..e9d17037ffcf 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.h
@@ -49,6 +49,7 @@ struct nv50_head_func {
int (*procamp)(struct nv50_head *, struct nv50_head_atom *);
int (*or)(struct nv50_head *, struct nv50_head_atom *);
void (*static_wndw_map)(struct nv50_head *, struct nv50_head_atom *);
+ int (*display_id)(struct nv50_head *, u32 display_id);
};
extern const struct nv50_head_func head507d;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 543f08ceaad6..53b1248c40ec 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -28,6 +28,19 @@
#include <nvhw/class/clc57d.h>
static int
+headc57d_display_id(struct nv50_head *head, u32 display_id)
+{
+ struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
+ int ret;
+
+ if ((ret = PUSH_WAIT(push, 2)))
+ return ret;
+
+ PUSH_NVSQ(push, NVC57D, 0x2020 + (head->base.index * 0x400), display_id);
+ return 0;
+}
+
+static int
headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
{
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
@@ -250,4 +263,5 @@ headc57d = {
.or = headc57d_or,
/* TODO: flexible window mappings */
.static_wndw_map = headc37d_static_wndw_map,
+ .display_id = headc57d_display_id,
};
diff --git a/drivers/gpu/drm/nouveau/include/nvif/conn.h b/drivers/gpu/drm/nouveau/include/nvif/conn.h
index dc355e1dfafa..406c12a111f9 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/conn.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/conn.h
@@ -7,6 +7,21 @@ struct nvif_disp;
struct nvif_conn {
struct nvif_object object;
+ u32 id;
+
+ struct {
+ enum {
+ NVIF_CONN_VGA,
+ NVIF_CONN_TV,
+ NVIF_CONN_DVI_I,
+ NVIF_CONN_DVI_D,
+ NVIF_CONN_LVDS,
+ NVIF_CONN_LVDS_SPWG,
+ NVIF_CONN_HDMI,
+ NVIF_CONN_DP,
+ NVIF_CONN_EDP,
+ } type;
+ } info;
};
int nvif_conn_ctor(struct nvif_disp *, const char *name, int id, struct nvif_conn *);
@@ -18,11 +33,6 @@ nvif_conn_id(struct nvif_conn *conn)
return conn->object.handle;
}
-#define NVIF_CONN_HPD_STATUS_UNSUPPORTED 0 /* negative if query fails */
-#define NVIF_CONN_HPD_STATUS_NOT_PRESENT 1
-#define NVIF_CONN_HPD_STATUS_PRESENT 2
-int nvif_conn_hpd_status(struct nvif_conn *);
-
int nvif_conn_event_ctor(struct nvif_conn *, const char *name, nvif_event_func, u8 types,
struct nvif_event *);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0011.h b/drivers/gpu/drm/nouveau/include/nvif/if0011.h
index 69b0b779f942..3ed0ddd75bd8 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0011.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0011.h
@@ -7,6 +7,16 @@ union nvif_conn_args {
__u8 version;
__u8 id; /* DCB connector table index. */
__u8 pad02[6];
+#define NVIF_CONN_V0_VGA 0x00
+#define NVIF_CONN_V0_TV 0x01
+#define NVIF_CONN_V0_DVI_I 0x02
+#define NVIF_CONN_V0_DVI_D 0x03
+#define NVIF_CONN_V0_LVDS 0x04
+#define NVIF_CONN_V0_LVDS_SPWG 0x05
+#define NVIF_CONN_V0_HDMI 0x06
+#define NVIF_CONN_V0_DP 0x07
+#define NVIF_CONN_V0_EDP 0x08
+ __u8 type;
} v0;
};
@@ -20,15 +30,4 @@ union nvif_conn_event_args {
__u8 pad02[6];
} v0;
};
-
-#define NVIF_CONN_V0_HPD_STATUS 0x00000000
-
-union nvif_conn_hpd_status_args {
- struct nvif_conn_hpd_status_v0 {
- __u8 version;
- __u8 support;
- __u8 present;
- __u8 pad03[5];
- } v0;
-};
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/if0012.h b/drivers/gpu/drm/nouveau/include/nvif/if0012.h
index 16d4ad5023a3..bde9bfae8d11 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/if0012.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/if0012.h
@@ -8,18 +8,86 @@ union nvif_outp_args {
struct nvif_outp_v0 {
__u8 version;
__u8 id; /* DCB device index. */
- __u8 pad02[6];
+#define NVIF_OUTP_V0_TYPE_DAC 0x00
+#define NVIF_OUTP_V0_TYPE_SOR 0x01
+#define NVIF_OUTP_V0_TYPE_PIOR 0x02
+ __u8 type;
+#define NVIF_OUTP_V0_PROTO_RGB_CRT 0x00
+#define NVIF_OUTP_V0_PROTO_TMDS 0x01
+#define NVIF_OUTP_V0_PROTO_LVDS 0x02
+#define NVIF_OUTP_V0_PROTO_DP 0x03
+ __u8 proto;
+ __u8 heads;
+ __u8 ddc;
+ __u8 conn;
+ union {
+ struct {
+ __u32 freq_max;
+ } rgb_crt;
+ struct {
+ __u8 dual;
+ } tmds;
+ struct {
+ __u8 acpi_edid;
+ } lvds;
+ struct {
+ __u8 aux;
+ __u8 mst;
+ __u8 increased_wm;
+ __u8 link_nr;
+ __u32 link_bw;
+ } dp;
+ };
} v0;
};
-#define NVIF_OUTP_V0_LOAD_DETECT 0x00
-#define NVIF_OUTP_V0_ACQUIRE 0x01
-#define NVIF_OUTP_V0_RELEASE 0x02
-#define NVIF_OUTP_V0_INFOFRAME 0x03
-#define NVIF_OUTP_V0_HDA_ELD 0x04
-#define NVIF_OUTP_V0_DP_AUX_PWR 0x05
-#define NVIF_OUTP_V0_DP_RETRAIN 0x06
-#define NVIF_OUTP_V0_DP_MST_VCPI 0x07
+#define NVIF_OUTP_V0_DETECT 0x00
+#define NVIF_OUTP_V0_EDID_GET 0x01
+
+#define NVIF_OUTP_V0_INHERIT 0x10
+#define NVIF_OUTP_V0_ACQUIRE 0x11
+#define NVIF_OUTP_V0_RELEASE 0x12
+
+#define NVIF_OUTP_V0_LOAD_DETECT 0x20
+
+#define NVIF_OUTP_V0_BL_GET 0x30
+#define NVIF_OUTP_V0_BL_SET 0x31
+
+#define NVIF_OUTP_V0_LVDS 0x40
+
+#define NVIF_OUTP_V0_HDMI 0x50
+
+#define NVIF_OUTP_V0_INFOFRAME 0x60
+#define NVIF_OUTP_V0_HDA_ELD 0x61
+
+#define NVIF_OUTP_V0_DP_AUX_PWR 0x70
+#define NVIF_OUTP_V0_DP_AUX_XFER 0x71
+#define NVIF_OUTP_V0_DP_RATES 0x72
+#define NVIF_OUTP_V0_DP_TRAIN 0x73
+#define NVIF_OUTP_V0_DP_DRIVE 0x74
+#define NVIF_OUTP_V0_DP_SST 0x75
+#define NVIF_OUTP_V0_DP_MST_ID_GET 0x76
+#define NVIF_OUTP_V0_DP_MST_ID_PUT 0x77
+#define NVIF_OUTP_V0_DP_MST_VCPI 0x78
+
+union nvif_outp_detect_args {
+ struct nvif_outp_detect_v0 {
+ __u8 version;
+#define NVIF_OUTP_DETECT_V0_NOT_PRESENT 0x00
+#define NVIF_OUTP_DETECT_V0_PRESENT 0x01
+#define NVIF_OUTP_DETECT_V0_UNKNOWN 0x02
+ __u8 status;
+ } v0;
+};
+
+union nvif_outp_edid_get_args {
+ struct nvif_outp_edid_get_v0 {
+ __u8 version;
+ __u8 pad01;
+ __u16 size;
+ __u8 data[2048];
+ } v0;
+};
union nvif_outp_load_detect_args {
struct nvif_outp_load_detect_v0 {
@@ -33,40 +101,39 @@ union nvif_outp_load_detect_args {
union nvif_outp_acquire_args {
struct nvif_outp_acquire_v0 {
__u8 version;
-#define NVIF_OUTP_ACQUIRE_V0_RGB_CRT 0x00
-#define NVIF_OUTP_ACQUIRE_V0_TV 0x01
-#define NVIF_OUTP_ACQUIRE_V0_TMDS 0x02
-#define NVIF_OUTP_ACQUIRE_V0_LVDS 0x03
-#define NVIF_OUTP_ACQUIRE_V0_DP 0x04
- __u8 proto;
+#define NVIF_OUTP_ACQUIRE_V0_DAC 0x00
+#define NVIF_OUTP_ACQUIRE_V0_SOR 0x01
+#define NVIF_OUTP_ACQUIRE_V0_PIOR 0x02
+ __u8 type;
__u8 or;
__u8 link;
__u8 pad04[4];
union {
struct {
- __u8 head;
- __u8 hdmi;
- __u8 hdmi_max_ac_packet;
- __u8 hdmi_rekey;
-#define NVIF_OUTP_ACQUIRE_V0_TMDS_HDMI_SCDC_SCRAMBLE (1 << 0)
-#define NVIF_OUTP_ACQUIRE_V0_TMDS_HDMI_SCDC_DIV_BY_4 (1 << 1)
- __u8 hdmi_scdc;
- __u8 hdmi_hda;
- __u8 pad06[2];
- } tmds;
- struct {
- __u8 dual;
- __u8 bpc8;
- __u8 pad02[6];
- } lvds;
+ __u8 hda;
+ } sor;
+ };
+ } v0;
+};
+
+union nvif_outp_inherit_args {
+ struct nvif_outp_inherit_v0 {
+ __u8 version;
+#define NVIF_OUTP_INHERIT_V0_RGB_CRT 0x00
+#define NVIF_OUTP_INHERIT_V0_TV 0x01
+#define NVIF_OUTP_INHERIT_V0_TMDS 0x02
+#define NVIF_OUTP_INHERIT_V0_LVDS 0x03
+#define NVIF_OUTP_INHERIT_V0_DP 0x04
+ // In/out. Input is one of the above values, output is the actual hw protocol
+ __u8 proto;
+ __u8 or;
+ __u8 link;
+ __u8 head;
+ union {
struct {
- __u8 link_nr; /* 0 = highest possible. */
- __u8 link_bw; /* 0 = highest possible, DP BW code otherwise. */
+ // TODO: Figure out padding, and whether we even want this field
__u8 hda;
- __u8 mst;
- __u8 pad04[4];
- __u8 dpcd[DP_RECEIVER_CAP_SIZE];
- } dp;
+ } tmds;
};
} v0;
};
@@ -76,6 +143,42 @@ union nvif_outp_release_args {
} vn;
};
+union nvif_outp_bl_get_args {
+ struct nvif_outp_bl_get_v0 {
+ __u8 version;
+ __u8 level;
+ } v0;
+};
+
+union nvif_outp_bl_set_args {
+ struct nvif_outp_bl_set_v0 {
+ __u8 version;
+ __u8 level;
+ } v0;
+};
+
+union nvif_outp_lvds_args {
+ struct nvif_outp_lvds_v0 {
+ __u8 version;
+ __u8 dual;
+ __u8 bpc8;
+ } v0;
+};
+
+union nvif_outp_hdmi_args {
+ struct nvif_outp_hdmi_v0 {
+ __u8 version;
+ __u8 head;
+ __u8 enable;
+ __u8 max_ac_packet;
+ __u8 rekey;
+ __u8 scdc;
+ __u8 scdc_scrambling;
+ __u8 scdc_low_rates;
+ __u32 khz;
+ } v0;
+};
+
union nvif_outp_infoframe_args {
struct nvif_outp_infoframe_v0 {
__u8 version;
@@ -105,9 +208,77 @@ union nvif_outp_dp_aux_pwr_args {
} v0;
};
-union nvif_outp_dp_retrain_args {
- struct nvif_outp_dp_retrain_vn {
- } vn;
+union nvif_outp_dp_aux_xfer_args {
+ struct nvif_outp_dp_aux_xfer_v0 {
+ __u8 version;
+ __u8 pad01;
+ __u8 type;
+ __u8 size;
+ __u32 addr;
+ __u8 data[16];
+ } v0;
+};
+
+union nvif_outp_dp_rates_args {
+ struct nvif_outp_dp_rates_v0 {
+ __u8 version;
+ __u8 pad01[6];
+ __u8 rates;
+ struct {
+ __s8 dpcd;
+ __u32 rate;
+ } rate[8];
+ } v0;
+};
+
+union nvif_outp_dp_train_args {
+ struct nvif_outp_dp_train_v0 {
+ __u8 version;
+ __u8 retrain;
+ __u8 mst;
+ __u8 lttprs;
+ __u8 post_lt_adj;
+ __u8 link_nr;
+ __u32 link_bw;
+ __u8 dpcd[DP_RECEIVER_CAP_SIZE];
+ } v0;
+};
+
+union nvif_outp_dp_drive_args {
+ struct nvif_outp_dp_drive_v0 {
+ __u8 version;
+ __u8 pad01[2];
+ __u8 lanes;
+ __u8 pe[4];
+ __u8 vs[4];
+ } v0;
+};
+
+union nvif_outp_dp_sst_args {
+ struct nvif_outp_dp_sst_v0 {
+ __u8 version;
+ __u8 head;
+ __u8 pad02[2];
+ __u32 watermark;
+ __u32 hblanksym;
+ __u32 vblanksym;
+ } v0;
+};
+
+union nvif_outp_dp_mst_id_put_args {
+ struct nvif_outp_dp_mst_id_put_v0 {
+ __u8 version;
+ __u8 pad01[3];
+ __u32 id;
+ } v0;
+};
+
+union nvif_outp_dp_mst_id_get_args {
+ struct nvif_outp_dp_mst_id_get_v0 {
+ __u8 version;
+ __u8 pad01[3];
+ __u32 id;
+ } v0;
};
union nvif_outp_dp_mst_vcpi_args {
diff --git a/drivers/gpu/drm/nouveau/include/nvif/outp.h b/drivers/gpu/drm/nouveau/include/nvif/outp.h
index fa76a7b5e4b3..bc122a5ba7df 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/outp.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/outp.h
@@ -8,6 +8,46 @@ struct nvif_disp;
struct nvif_outp {
struct nvif_object object;
+ u32 id;
+
+ struct {
+ enum {
+ NVIF_OUTP_DAC,
+ NVIF_OUTP_SOR,
+ NVIF_OUTP_PIOR,
+ } type;
+
+ enum {
+ NVIF_OUTP_RGB_CRT,
+ NVIF_OUTP_TMDS,
+ NVIF_OUTP_LVDS,
+ NVIF_OUTP_DP,
+ } proto;
+
+ u8 heads;
+#define NVIF_OUTP_DDC_INVALID 0xff
+ u8 ddc;
+ u8 conn;
+
+ union {
+ struct {
+ u32 freq_max;
+ } rgb_crt;
+ struct {
+ bool dual;
+ } tmds;
+ struct {
+ bool acpi_edid;
+ } lvds;
+ struct {
+ u8 aux;
+ bool mst;
+ bool increased_wm;
+ u8 link_nr;
+ u32 link_bw;
+ } dp;
+ };
+ } info;
struct {
int id;
@@ -17,18 +57,60 @@ struct nvif_outp {
int nvif_outp_ctor(struct nvif_disp *, const char *name, int id, struct nvif_outp *);
void nvif_outp_dtor(struct nvif_outp *);
+
+enum nvif_outp_detect_status {
+ NOT_PRESENT,
+ PRESENT,
+ UNKNOWN,
+};
+
+enum nvif_outp_detect_status nvif_outp_detect(struct nvif_outp *);
+int nvif_outp_edid_get(struct nvif_outp *, u8 **pedid);
+
int nvif_outp_load_detect(struct nvif_outp *, u32 loadval);
-int nvif_outp_acquire_rgb_crt(struct nvif_outp *);
-int nvif_outp_acquire_tmds(struct nvif_outp *, int head,
- bool hdmi, u8 max_ac_packet, u8 rekey, u8 scdc, bool hda);
-int nvif_outp_acquire_lvds(struct nvif_outp *, bool dual, bool bpc8);
-int nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
- int link_nr, int link_bw, bool hda, bool mst);
+int nvif_outp_acquire_dac(struct nvif_outp *);
+int nvif_outp_acquire_sor(struct nvif_outp *, bool hda);
+int nvif_outp_acquire_pior(struct nvif_outp *);
+int nvif_outp_inherit_rgb_crt(struct nvif_outp *outp, u8 *proto_out);
+int nvif_outp_inherit_lvds(struct nvif_outp *outp, u8 *proto_out);
+int nvif_outp_inherit_tmds(struct nvif_outp *outp, u8 *proto_out);
+int nvif_outp_inherit_dp(struct nvif_outp *outp, u8 *proto_out);
+
void nvif_outp_release(struct nvif_outp *);
+
+static inline bool
+nvif_outp_acquired(struct nvif_outp *outp)
+{
+ return outp->or.id >= 0;
+}
+
+int nvif_outp_bl_get(struct nvif_outp *);
+int nvif_outp_bl_set(struct nvif_outp *, int level);
+
+int nvif_outp_lvds(struct nvif_outp *, bool dual, bool bpc8);
+
+int nvif_outp_hdmi(struct nvif_outp *, int head, bool enable, u8 max_ac_packet, u8 rekey, u32 khz,
+ bool scdc, bool scdc_scrambling, bool scdc_low_rates);
+
int nvif_outp_infoframe(struct nvif_outp *, u8 type, struct nvif_outp_infoframe_v0 *, u32 size);
int nvif_outp_hda_eld(struct nvif_outp *, int head, void *data, u32 size);
+
int nvif_outp_dp_aux_pwr(struct nvif_outp *, bool enable);
-int nvif_outp_dp_retrain(struct nvif_outp *);
+int nvif_outp_dp_aux_xfer(struct nvif_outp *, u8 type, u8 *size, u32 addr, u8 *data);
+
+struct nvif_outp_dp_rate {
+ int dpcd; /* -1 for non-indexed rates */
+ u32 rate;
+};
+
+int nvif_outp_dp_rates(struct nvif_outp *, struct nvif_outp_dp_rate *rate, int rate_nr);
+int nvif_outp_dp_train(struct nvif_outp *, u8 dpcd[DP_RECEIVER_CAP_SIZE],
+ u8 lttprs, u8 link_nr, u32 link_bw, bool mst, bool post_lt_adj,
+ bool retrain);
+int nvif_outp_dp_drive(struct nvif_outp *, u8 link_nr, u8 pe[4], u8 vs[4]);
+int nvif_outp_dp_sst(struct nvif_outp *, int head, u32 watermark, u32 hblanksym, u32 vblanksym);
+int nvif_outp_dp_mst_id_get(struct nvif_outp *, u32 *id);
+int nvif_outp_dp_mst_id_put(struct nvif_outp *, u32 id);
int nvif_outp_dp_mst_vcpi(struct nvif_outp *, int head,
u8 start_slot, u8 num_slots, u16 pbn, u16 aligned_pbn);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
index d3b6a68ddda3..fc0f38981391 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/memory.h
@@ -12,6 +12,7 @@ struct nvkm_tags {
};
enum nvkm_memory_target {
+ NVKM_MEM_TARGET_INST_SR_LOST, /* instance memory - not preserved across suspend */
NVKM_MEM_TARGET_INST, /* instance memory */
NVKM_MEM_TARGET_VRAM, /* video memory */
NVKM_MEM_TARGET_HOST, /* coherent system memory */
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
index fcdaefc99fe8..92a36ddfc29f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/instmem.h
@@ -26,7 +26,7 @@ struct nvkm_instmem {
u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
void nvkm_instmem_wr32(struct nvkm_instmem *, u32 addr, u32 data);
-int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
+int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero, bool preserve,
struct nvkm_memory **);
int nvkm_instobj_wrap(struct nvkm_device *, struct nvkm_memory *, struct nvkm_memory **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 91b5ecc57538..d47442125fa1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -109,42 +109,6 @@ nv40_backlight_init(struct nouveau_encoder *encoder,
return 0;
}
-static int
-nv50_get_intensity(struct backlight_device *bd)
-{
- struct nouveau_encoder *nv_encoder = bl_get_data(bd);
- struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_object *device = &drm->client.device.object;
- int or = ffs(nv_encoder->dcb->or) - 1;
- u32 div = 1025;
- u32 val;
-
- val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
- val &= NV50_PDISP_SOR_PWM_CTL_VAL;
- return ((val * 100) + (div / 2)) / div;
-}
-
-static int
-nv50_set_intensity(struct backlight_device *bd)
-{
- struct nouveau_encoder *nv_encoder = bl_get_data(bd);
- struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_object *device = &drm->client.device.object;
- int or = ffs(nv_encoder->dcb->or) - 1;
- u32 div = 1025;
- u32 val = (bd->props.brightness * div) / 100;
-
- nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
- NV50_PDISP_SOR_PWM_CTL_NEW | val);
- return 0;
-}
-
-static const struct backlight_ops nv50_bl_ops = {
- .options = BL_CORE_SUSPENDRESUME,
- .get_brightness = nv50_get_intensity,
- .update_status = nv50_set_intensity,
-};
-
/*
* eDP brightness callbacks need to happen under lock, since we need to
* enable/disable the backlight ourselves for modesets
@@ -238,53 +202,25 @@ static const struct backlight_ops nv50_edp_bl_ops = {
};
static int
-nva3_get_intensity(struct backlight_device *bd)
+nv50_get_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
- struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_object *device = &drm->client.device.object;
- int or = ffs(nv_encoder->dcb->or) - 1;
- u32 div, val;
- div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
- val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
- val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
- if (div && div >= val)
- return ((val * 100) + (div / 2)) / div;
-
- return 100;
+ return nvif_outp_bl_get(&nv_encoder->outp);
}
static int
-nva3_set_intensity(struct backlight_device *bd)
+nv50_set_intensity(struct backlight_device *bd)
{
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
- struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_object *device = &drm->client.device.object;
- int or = ffs(nv_encoder->dcb->or) - 1;
- u32 div, val;
-
- div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
- val = backlight_get_brightness(bd);
- if (val)
- val = (val * div) / 100;
-
- if (div) {
- nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
- val |
- NV50_PDISP_SOR_PWM_CTL_NEW |
- NVA3_PDISP_SOR_PWM_CTL_UNK);
- return 0;
- }
-
- return -EINVAL;
+ return nvif_outp_bl_set(&nv_encoder->outp, backlight_get_brightness(bd));
}
-static const struct backlight_ops nva3_bl_ops = {
+static const struct backlight_ops nv50_bl_ops = {
.options = BL_CORE_SUSPENDRESUME,
- .get_brightness = nva3_get_intensity,
- .update_status = nva3_set_intensity,
+ .get_brightness = nv50_get_intensity,
+ .update_status = nv50_set_intensity,
};
/* FIXME: perform backlight probing for eDP _before_ this, this only gets called after connector
@@ -298,13 +234,12 @@ nv50_backlight_init(struct nouveau_backlight *bl,
const struct backlight_ops **ops)
{
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
- struct nvif_object *device = &drm->client.device.object;
/*
* Note when this runs the connectors have not been probed yet,
* so nv_conn->base.status is not set yet.
*/
- if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) ||
+ if (nvif_outp_bl_get(&nv_encoder->outp) < 0 ||
drm_helper_probe_detect(&nv_conn->base, NULL, false) != connector_status_connected)
return -ENODEV;
@@ -346,15 +281,8 @@ nv50_backlight_init(struct nouveau_backlight *bl,
}
}
- if (drm->client.device.info.chipset <= 0xa0 ||
- drm->client.device.info.chipset == 0xaa ||
- drm->client.device.info.chipset == 0xac)
- *ops = &nv50_bl_ops;
- else
- *ops = &nva3_bl_ops;
-
+ *ops = &nv50_bl_ops;
props->max_brightness = 100;
-
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 189903b65edc..9e878cdc8e38 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -2093,9 +2093,11 @@ nouveau_bios_init(struct drm_device *dev)
if (!NVInitVBIOS(dev))
return -ENODEV;
- ret = parse_dcb_table(dev, bios);
- if (ret)
- return ret;
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ ret = parse_dcb_table(dev, bios);
+ if (ret)
+ return ret;
+ }
if (!bios->major_version) /* we don't run version 0 bios */
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 79ea30aac31f..856b3ef5edb8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -400,10 +400,8 @@ nouveau_connector_destroy(struct drm_connector *connector)
kfree(nv_connector->edid);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- if (nv_connector->aux.transfer) {
+ if (nv_connector->aux.transfer)
drm_dp_cec_unregister_connector(&nv_connector->aux);
- kfree(nv_connector->aux.name);
- }
nvif_conn_dtor(&nv_connector->conn);
kfree(connector);
}
@@ -413,6 +411,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct nouveau_connector *conn = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
struct drm_encoder *encoder;
int ret;
@@ -421,33 +420,48 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
drm_connector_for_each_possible_encoder(connector, encoder) {
nv_encoder = nouveau_encoder(encoder);
- switch (nv_encoder->dcb->type) {
- case DCB_OUTPUT_DP:
- ret = nouveau_dp_detect(nouveau_connector(connector),
- nv_encoder);
- if (ret == NOUVEAU_DP_MST)
- return NULL;
- else if (ret == NOUVEAU_DP_SST)
- found = nv_encoder;
+ if (nvif_object_constructed(&nv_encoder->outp.object)) {
+ enum nvif_outp_detect_status status;
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
+ ret = nouveau_dp_detect(conn, nv_encoder);
+ if (ret == NOUVEAU_DP_MST)
+ return NULL;
+ if (ret != NOUVEAU_DP_SST)
+ continue;
+
+ return nv_encoder;
+ } else {
+ status = nvif_outp_detect(&nv_encoder->outp);
+ switch (status) {
+ case PRESENT:
+ return nv_encoder;
+ case NOT_PRESENT:
+ continue;
+ case UNKNOWN:
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ }
+ }
- break;
- case DCB_OUTPUT_LVDS:
+ if (!nv_encoder->i2c)
+ continue;
+
+ if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
VGA_SWITCHEROO_CAN_SWITCH_DDC);
- fallthrough;
- default:
- if (!nv_encoder->i2c)
- break;
+ }
- if (switcheroo_ddc)
- vga_switcheroo_lock_ddc(pdev);
- if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
- found = nv_encoder;
- if (switcheroo_ddc)
- vga_switcheroo_unlock_ddc(pdev);
+ if (switcheroo_ddc)
+ vga_switcheroo_lock_ddc(pdev);
+ if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
+ found = nv_encoder;
+ if (switcheroo_ddc)
+ vga_switcheroo_unlock_ddc(pdev);
- break;
- }
if (found)
break;
}
@@ -554,7 +568,6 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
struct nouveau_encoder *nv_partner;
- struct i2c_adapter *i2c;
int type;
int ret;
enum drm_connector_status conn_status = connector_status_disconnected;
@@ -577,15 +590,20 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
}
nv_encoder = nouveau_connector_ddc_detect(connector);
- if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
- struct edid *new_edid;
+ if (nv_encoder) {
+ struct edid *new_edid = NULL;
- if ((vga_switcheroo_handler_flags() &
- VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
- nv_connector->type == DCB_CONNECTOR_LVDS)
- new_edid = drm_get_edid_switcheroo(connector, i2c);
- else
- new_edid = drm_get_edid(connector, i2c);
+ if (nv_encoder->i2c) {
+ if ((vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
+ nv_connector->type == DCB_CONNECTOR_LVDS)
+ new_edid = drm_get_edid_switcheroo(connector, nv_encoder->i2c);
+ else
+ new_edid = drm_get_edid(connector, nv_encoder->i2c);
+ } else {
+ ret = nvif_outp_edid_get(&nv_encoder->outp, (u8 **)&new_edid);
+ if (ret < 0)
+ return connector_status_disconnected;
+ }
nouveau_connector_set_edid(nv_connector, new_edid);
if (!nv_connector->edid) {
@@ -1117,7 +1135,7 @@ nouveau_connector_atomic_check(struct drm_connector *connector, struct drm_atomi
struct drm_connector_state *conn_state =
drm_atomic_get_new_connector_state(state, connector);
- if (!nv_conn->dp_encoder || !nv50_has_mst(nouveau_drm(connector->dev)))
+ if (!nv_conn->dp_encoder || !nv_conn->dp_encoder->dp.mstm)
return 0;
return drm_dp_mst_root_conn_atomic_check(conn_state, &nv_conn->dp_encoder->dp.mstm->mgr);
@@ -1206,23 +1224,17 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
struct nouveau_connector *nv_connector =
container_of(obj, typeof(*nv_connector), aux);
struct nouveau_encoder *nv_encoder;
- struct nvkm_i2c_aux *aux;
u8 size = msg->size;
int ret;
nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
- if (!nv_encoder || !(aux = nv_encoder->aux))
+ if (!nv_encoder)
return -ENODEV;
if (WARN_ON(msg->size > 16))
return -E2BIG;
- ret = nvkm_i2c_aux_acquire(aux);
- if (ret)
- return ret;
-
- ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
- msg->buffer, &size);
- nvkm_i2c_aux_release(aux);
+ ret = nvif_outp_dp_aux_xfer(&nv_encoder->outp,
+ msg->request, &size, msg->address, msg->buffer);
if (ret >= 0) {
msg->reply = ret;
return size;
@@ -1263,17 +1275,13 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
}
struct drm_connector *
-nouveau_connector_create(struct drm_device *dev,
- const struct dcb_output *dcbe)
+nouveau_connector_create(struct drm_device *dev, int index)
{
- const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- char aux_name[48] = {0};
- int index = dcbe->connector;
int type, ret = 0;
bool dummy;
@@ -1295,74 +1303,86 @@ nouveau_connector_create(struct drm_device *dev,
nv_connector->index = index;
INIT_WORK(&nv_connector->irq_work, nouveau_dp_irq);
- /* attempt to parse vbios connector type and hotplug gpio */
- nv_connector->dcb = olddcb_conn(dev, index);
- if (nv_connector->dcb) {
- u32 entry = ROM16(nv_connector->dcb[0]);
- if (olddcb_conntab(dev)[3] >= 4)
- entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
-
- nv_connector->type = nv_connector->dcb[0];
- if (drm_conntype_from_dcb(nv_connector->type) ==
- DRM_MODE_CONNECTOR_Unknown) {
- NV_WARN(drm, "unknown connector type %02x\n",
- nv_connector->type);
- nv_connector->type = DCB_CONNECTOR_NONE;
+ if (disp->disp.conn_mask & BIT(nv_connector->index)) {
+ ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
+ &nv_connector->conn);
+ if (ret) {
+ kfree(nv_connector);
+ return ERR_PTR(ret);
}
- /* Gigabyte NX85T */
- if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
- if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
- nv_connector->type = DCB_CONNECTOR_DVI_I;
+ switch (nv_connector->conn.info.type) {
+ case NVIF_CONN_VGA : type = DCB_CONNECTOR_VGA; break;
+ case NVIF_CONN_DVI_I : type = DCB_CONNECTOR_DVI_I; break;
+ case NVIF_CONN_DVI_D : type = DCB_CONNECTOR_DVI_D; break;
+ case NVIF_CONN_LVDS : type = DCB_CONNECTOR_LVDS; break;
+ case NVIF_CONN_LVDS_SPWG: type = DCB_CONNECTOR_LVDS_SPWG; break;
+ case NVIF_CONN_DP : type = DCB_CONNECTOR_DP; break;
+ case NVIF_CONN_EDP : type = DCB_CONNECTOR_eDP; break;
+ case NVIF_CONN_HDMI : type = DCB_CONNECTOR_HDMI_0; break;
+ default:
+ WARN_ON(1);
+ return NULL;
}
- /* Gigabyte GV-NX86T512H */
- if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
- if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
- nv_connector->type = DCB_CONNECTOR_DVI_I;
- }
+ nv_connector->type = type;
} else {
- nv_connector->type = DCB_CONNECTOR_NONE;
- }
+ u8 *dcb = olddcb_conn(dev, nv_connector->index);
- /* no vbios data, or an unknown dcb connector type - attempt to
- * figure out something suitable ourselves
- */
- if (nv_connector->type == DCB_CONNECTOR_NONE) {
- struct nouveau_drm *drm = nouveau_drm(dev);
- struct dcb_table *dcbt = &drm->vbios.dcb;
- u32 encoders = 0;
- int i;
-
- for (i = 0; i < dcbt->entries; i++) {
- if (dcbt->entry[i].connector == nv_connector->index)
- encoders |= (1 << dcbt->entry[i].type);
+ if (dcb)
+ nv_connector->type = dcb[0];
+ else
+ nv_connector->type = DCB_CONNECTOR_NONE;
+
+ /* attempt to parse vbios connector type and hotplug gpio */
+ if (nv_connector->type != DCB_CONNECTOR_NONE) {
+ if (drm_conntype_from_dcb(nv_connector->type) ==
+ DRM_MODE_CONNECTOR_Unknown) {
+ NV_WARN(drm, "unknown connector type %02x\n",
+ nv_connector->type);
+ nv_connector->type = DCB_CONNECTOR_NONE;
+ }
}
- if (encoders & (1 << DCB_OUTPUT_DP)) {
- if (encoders & (1 << DCB_OUTPUT_TMDS))
- nv_connector->type = DCB_CONNECTOR_DP;
- else
- nv_connector->type = DCB_CONNECTOR_eDP;
- } else
- if (encoders & (1 << DCB_OUTPUT_TMDS)) {
- if (encoders & (1 << DCB_OUTPUT_ANALOG))
- nv_connector->type = DCB_CONNECTOR_DVI_I;
- else
- nv_connector->type = DCB_CONNECTOR_DVI_D;
- } else
- if (encoders & (1 << DCB_OUTPUT_ANALOG)) {
- nv_connector->type = DCB_CONNECTOR_VGA;
- } else
- if (encoders & (1 << DCB_OUTPUT_LVDS)) {
- nv_connector->type = DCB_CONNECTOR_LVDS;
- } else
- if (encoders & (1 << DCB_OUTPUT_TV)) {
- nv_connector->type = DCB_CONNECTOR_TV_0;
+ /* no vbios data, or an unknown dcb connector type - attempt to
+ * figure out something suitable ourselves
+ */
+ if (nv_connector->type == DCB_CONNECTOR_NONE &&
+ !WARN_ON(drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)) {
+ struct dcb_table *dcbt = &drm->vbios.dcb;
+ u32 encoders = 0;
+ int i;
+
+ for (i = 0; i < dcbt->entries; i++) {
+ if (dcbt->entry[i].connector == nv_connector->index)
+ encoders |= (1 << dcbt->entry[i].type);
+ }
+
+ if (encoders & (1 << DCB_OUTPUT_TMDS)) {
+ if (encoders & (1 << DCB_OUTPUT_ANALOG))
+ nv_connector->type = DCB_CONNECTOR_DVI_I;
+ else
+ nv_connector->type = DCB_CONNECTOR_DVI_D;
+ } else
+ if (encoders & (1 << DCB_OUTPUT_ANALOG)) {
+ nv_connector->type = DCB_CONNECTOR_VGA;
+ } else
+ if (encoders & (1 << DCB_OUTPUT_LVDS)) {
+ nv_connector->type = DCB_CONNECTOR_LVDS;
+ } else
+ if (encoders & (1 << DCB_OUTPUT_TV)) {
+ nv_connector->type = DCB_CONNECTOR_TV_0;
+ }
}
}
- switch ((type = drm_conntype_from_dcb(nv_connector->type))) {
+ type = drm_conntype_from_dcb(nv_connector->type);
+ if (type == DRM_MODE_CONNECTOR_LVDS)
+ drm_connector_init(dev, connector, &nouveau_connector_funcs_lvds, type);
+ else
+ drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
+
+ switch (type) {
case DRM_MODE_CONNECTOR_LVDS:
ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
if (ret) {
@@ -1371,24 +1391,16 @@ nouveau_connector_create(struct drm_device *dev,
return ERR_PTR(ret);
}
- funcs = &nouveau_connector_funcs_lvds;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
nv_connector->aux.dev = connector->kdev;
nv_connector->aux.drm_dev = dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
- snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
- dcbe->hasht, dcbe->hashm);
- nv_connector->aux.name = kstrdup(aux_name, GFP_KERNEL);
- if (!nv_connector->aux.name) {
- kfree(nv_connector);
- return ERR_PTR(-ENOMEM);
- }
+ nv_connector->aux.name = connector->name;
drm_dp_aux_init(&nv_connector->aux);
break;
default:
- funcs = &nouveau_connector_funcs;
break;
}
@@ -1403,17 +1415,10 @@ nouveau_connector_create(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_init(dev, connector, funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- if (nv_connector->dcb && (disp->disp.conn_mask & BIT(nv_connector->index))) {
- ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
- &nv_connector->conn);
- if (ret) {
- goto drm_conn_err;
- }
-
+ if (nvif_object_constructed(&nv_connector->conn.object)) {
ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsHotplug",
nouveau_connector_hotplug,
NVIF_CONN_EVENT_V0_PLUG | NVIF_CONN_EVENT_V0_UNPLUG,
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 35bcb541722b..a2df4918340c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -121,7 +121,6 @@ struct nouveau_connector {
struct drm_connector base;
enum dcb_connector_type type;
u8 index;
- u8 *dcb;
struct nvif_conn conn;
u64 hpd_pending;
@@ -200,7 +199,7 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
}
struct drm_connector *
-nouveau_connector_create(struct drm_device *, const struct dcb_output *);
+nouveau_connector_create(struct drm_device *, int id);
void nouveau_connector_hpd(struct nouveau_connector *, u64 bits);
extern int nouveau_tv_disable;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 053f703f2f68..e83db051e851 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -231,7 +231,7 @@ nouveau_debugfs_gpuva(struct seq_file *m, void *data)
continue;
nouveau_uvmm_lock(uvmm);
- drm_debugfs_gpuva_info(m, &uvmm->umgr);
+ drm_debugfs_gpuva_info(m, &uvmm->base);
seq_puts(m, "\n");
nouveau_debugfs_gpuva_regions(m, uvmm);
nouveau_uvmm_unlock(uvmm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 99977e5fe716..d8c92521226d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -724,10 +724,10 @@ nouveau_display_create(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
drm_kms_helper_poll_disable(dev);
- if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
- ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0,
- &disp->disp);
- if (ret == 0) {
+ if (nouveau_modeset != 2) {
+ ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0, &disp->disp);
+
+ if (!ret && (disp->disp.outp_mask || drm->vbios.dcb.entries)) {
nouveau_display_create_properties(dev);
if (disp->disp.object.oclass < NV50_DISP) {
dev->mode_config.fb_modifiers_not_supported = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 6a4980b2d4d4..7de7707ec6a8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -42,6 +42,21 @@ nouveau_dp_has_sink_count(struct drm_connector *connector,
return drm_dp_read_sink_count_cap(connector, outp->dp.dpcd, &outp->dp.desc);
}
+static bool
+nouveau_dp_probe_lttpr(struct nouveau_encoder *outp)
+{
+ u8 rev, size = sizeof(rev);
+ int ret;
+
+ ret = nvif_outp_dp_aux_xfer(&outp->outp, DP_AUX_NATIVE_READ, &size,
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
+ &rev);
+ if (ret || size < sizeof(rev) || rev < 0x14)
+ return false;
+
+ return true;
+}
+
static enum drm_connector_status
nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
struct nouveau_encoder *outp)
@@ -53,10 +68,112 @@ nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
int ret;
u8 *dpcd = outp->dp.dpcd;
+ outp->dp.lttpr.nr = 0;
+ outp->dp.rate_nr = 0;
+ outp->dp.link_nr = 0;
+ outp->dp.link_bw = 0;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
+ nouveau_dp_probe_lttpr(outp) &&
+ !drm_dp_read_dpcd_caps(aux, dpcd) &&
+ !drm_dp_read_lttpr_common_caps(aux, dpcd, outp->dp.lttpr.caps)) {
+ int nr = drm_dp_lttpr_count(outp->dp.lttpr.caps);
+
+ if (nr) {
+ drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
+ DP_PHY_REPEATER_MODE_TRANSPARENT);
+
+ if (nr > 0) {
+ ret = drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
+ DP_PHY_REPEATER_MODE_NON_TRANSPARENT);
+ if (ret != 1) {
+ drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
+ DP_PHY_REPEATER_MODE_TRANSPARENT);
+ } else {
+ outp->dp.lttpr.nr = nr;
+ }
+ }
+ }
+ }
+
ret = drm_dp_read_dpcd_caps(aux, dpcd);
if (ret < 0)
goto out;
+ outp->dp.link_nr = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+ if (outp->dcb->dpconf.link_nr < outp->dp.link_nr)
+ outp->dp.link_nr = outp->dcb->dpconf.link_nr;
+
+ if (outp->dp.lttpr.nr) {
+ int links = drm_dp_lttpr_max_lane_count(outp->dp.lttpr.caps);
+
+ if (links && links < outp->dp.link_nr)
+ outp->dp.link_nr = links;
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && dpcd[DP_DPCD_REV] >= 0x13) {
+ __le16 rates[DP_MAX_SUPPORTED_RATES];
+
+ ret = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, rates, sizeof(rates));
+ if (ret == sizeof(rates)) {
+ for (int i = 0; i < ARRAY_SIZE(rates); i++) {
+ u32 rate = (le16_to_cpu(rates[i]) * 200) / 10;
+ int j;
+
+ if (!rate)
+ break;
+
+ for (j = 0; j < outp->dp.rate_nr; j++) {
+ if (rate > outp->dp.rate[j].rate) {
+ for (int k = outp->dp.rate_nr; k > j; k--)
+ outp->dp.rate[k] = outp->dp.rate[k - 1];
+ break;
+ }
+ }
+
+ outp->dp.rate[j].dpcd = i;
+ outp->dp.rate[j].rate = rate;
+ outp->dp.rate_nr++;
+ }
+ }
+ }
+
+ if (!outp->dp.rate_nr) {
+ const u32 rates[] = { 810000, 540000, 270000, 162000 };
+ u32 max_rate = dpcd[DP_MAX_LINK_RATE] * 27000;
+
+ if (outp->dp.lttpr.nr) {
+ int rate = drm_dp_lttpr_max_link_rate(outp->dp.lttpr.caps);
+
+ if (rate && rate < max_rate)
+ max_rate = rate;
+ }
+
+ max_rate = min_t(int, max_rate, outp->dcb->dpconf.link_bw);
+
+ for (int i = 0; i < ARRAY_SIZE(rates); i++) {
+ if (rates[i] <= max_rate) {
+ outp->dp.rate[outp->dp.rate_nr].dpcd = -1;
+ outp->dp.rate[outp->dp.rate_nr].rate = rates[i];
+ outp->dp.rate_nr++;
+ }
+ }
+
+ if (WARN_ON(!outp->dp.rate_nr))
+ goto out;
+ }
+
+ ret = nvif_outp_dp_rates(&outp->outp, outp->dp.rate, outp->dp.rate_nr);
+ if (ret)
+ goto out;
+
+ for (int i = 0; i < outp->dp.rate_nr; i++) {
+ u32 link_bw = outp->dp.rate[i].rate;
+
+ if (link_bw > outp->dp.link_bw)
+ outp->dp.link_bw = link_bw;
+ }
+
ret = drm_dp_read_desc(aux, &outp->dp.desc, drm_dp_is_branch(dpcd));
if (ret < 0)
goto out;
@@ -132,14 +249,8 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
}
}
- /* Check status of HPD pin before attempting an AUX transaction that
- * would result in a number of (futile) retries on a connector which
- * has no display plugged.
- *
- * TODO: look into checking this before probing I2C to detect DVI/HDMI
- */
- hpd = nvif_conn_hpd_status(&nv_connector->conn);
- if (hpd == NVIF_CONN_HPD_STATUS_NOT_PRESENT) {
+ hpd = nvif_outp_detect(&nv_encoder->outp);
+ if (hpd == NOT_PRESENT) {
nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
goto out;
}
@@ -157,39 +268,14 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
goto out;
}
- nv_encoder->dp.link_bw = 27000 * dpcd[DP_MAX_LINK_RATE];
- nv_encoder->dp.link_nr =
- dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && dpcd[DP_DPCD_REV] >= 0x13) {
- struct drm_dp_aux *aux = &nv_connector->aux;
- int ret, i;
- u8 sink_rates[16];
-
- ret = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, sink_rates, sizeof(sink_rates));
- if (ret == sizeof(sink_rates)) {
- for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
- int val = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
- if (val && (i == 0 || val > nv_encoder->dp.link_bw))
- nv_encoder->dp.link_bw = val;
- }
- }
- }
-
- NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n",
- nv_encoder->dp.link_nr, nv_encoder->dp.link_bw,
- dpcd[DP_DPCD_REV]);
- NV_DEBUG(drm, "encoder: %dx%d\n",
- nv_encoder->dcb->dpconf.link_nr,
- nv_encoder->dcb->dpconf.link_bw);
-
- if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr)
- nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
- if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw)
- nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
+ NV_DEBUG(drm, "sink dpcd version: 0x%02x\n", dpcd[DP_DPCD_REV]);
+ for (int i = 0; i < nv_encoder->dp.rate_nr; i++)
+ NV_DEBUG(drm, "sink rate %d: %d\n", i, nv_encoder->dp.rate[i].rate);
- NV_DEBUG(drm, "maximum: %dx%d\n",
- nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
+ NV_DEBUG(drm, "encoder: %dx%d\n", nv_encoder->dcb->dpconf.link_nr,
+ nv_encoder->dcb->dpconf.link_bw);
+ NV_DEBUG(drm, "maximum: %dx%d\n", nv_encoder->dp.link_nr,
+ nv_encoder->dp.link_bw);
if (mstm && mstm->can_mst) {
ret = nv50_mstm_detect(nv_encoder);
@@ -211,15 +297,186 @@ out:
return ret;
}
+void
+nouveau_dp_power_down(struct nouveau_encoder *outp)
+{
+ struct drm_dp_aux *aux = &outp->conn->aux;
+ int ret;
+ u8 pwr;
+
+ mutex_lock(&outp->dp.hpd_irq_lock);
+
+ ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
+ if (ret == 1) {
+ pwr &= ~DP_SET_POWER_MASK;
+ pwr |= DP_SET_POWER_D3;
+ drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
+ }
+
+ outp->dp.lt.nr = 0;
+ mutex_unlock(&outp->dp.hpd_irq_lock);
+}
+
+static bool
+nouveau_dp_train_link(struct nouveau_encoder *outp, bool retrain)
+{
+ struct drm_dp_aux *aux = &outp->conn->aux;
+ bool post_lt = false;
+ int ret, retries = 0;
+
+ if ( (outp->dp.dpcd[DP_MAX_LANE_COUNT] & 0x20) &&
+ !(outp->dp.dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED))
+ post_lt = true;
+
+retry:
+ ret = nvif_outp_dp_train(&outp->outp, outp->dp.dpcd,
+ outp->dp.lttpr.nr,
+ outp->dp.lt.nr,
+ outp->dp.lt.bw,
+ outp->dp.lt.mst,
+ post_lt,
+ retrain);
+ if (ret)
+ return false;
+
+ if (post_lt) {
+ u8 stat[DP_LINK_STATUS_SIZE];
+ u8 prev[2];
+ u8 time = 0, adjusts = 0, tmp;
+
+ ret = drm_dp_dpcd_read_phy_link_status(aux, DP_PHY_DPRX, stat);
+ if (ret)
+ return false;
+
+ for (;;) {
+ if (!drm_dp_channel_eq_ok(stat, outp->dp.lt.nr)) {
+ ret = 1;
+ break;
+ }
+
+ if (!(stat[2] & 0x02))
+ break;
+
+ msleep(5);
+ time += 5;
+
+ memcpy(prev, &stat[4], sizeof(prev));
+ ret = drm_dp_dpcd_read_phy_link_status(aux, DP_PHY_DPRX, stat);
+ if (ret)
+ break;
+
+ if (!memcmp(prev, &stat[4], sizeof(prev))) {
+ if (time > 200)
+ break;
+ } else {
+ u8 pe[4], vs[4];
+
+ if (adjusts++ == 6)
+ break;
+
+ for (int i = 0; i < outp->dp.lt.nr; i++) {
+ pe[i] = drm_dp_get_adjust_request_pre_emphasis(stat, i) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+ vs[i] = drm_dp_get_adjust_request_voltage(stat, i) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ }
+
+ ret = nvif_outp_dp_drive(&outp->outp, outp->dp.lt.nr, pe, vs);
+ if (ret)
+ break;
+
+ time = 0;
+ }
+ }
+
+ if (drm_dp_dpcd_readb(aux, DP_LANE_COUNT_SET, &tmp) == 1) {
+ tmp &= ~0x20;
+ drm_dp_dpcd_writeb(aux, DP_LANE_COUNT_SET, tmp);
+ }
+ }
+
+ if (ret == 1 && retries++ < 3)
+ goto retry;
+
+ return ret == 0;
+}
+
bool
-nouveau_dp_link_check(struct nouveau_connector *nv_connector)
+nouveau_dp_train(struct nouveau_encoder *outp, bool mst, u32 khz, u8 bpc)
+{
+ struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
+ struct drm_dp_aux *aux = &outp->conn->aux;
+ u32 min_rate;
+ u8 pwr;
+ bool ret = true;
+
+ if (mst)
+ min_rate = outp->dp.link_nr * outp->dp.rate[0].rate;
+ else
+ min_rate = DIV_ROUND_UP(khz * bpc * 3, 8);
+
+ NV_DEBUG(drm, "%s link training (mst:%d min_rate:%d)\n",
+ outp->base.base.name, mst, min_rate);
+
+ mutex_lock(&outp->dp.hpd_irq_lock);
+
+ if (drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr) == 1) {
+ if ((pwr & DP_SET_POWER_MASK) != DP_SET_POWER_D0) {
+ pwr &= ~DP_SET_POWER_MASK;
+ pwr |= DP_SET_POWER_D0;
+ drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
+ }
+ }
+
+ for (int nr = outp->dp.link_nr; nr; nr >>= 1) {
+ for (int rate = 0; rate < outp->dp.rate_nr; rate++) {
+ if (outp->dp.rate[rate].rate * nr >= min_rate) {
+ outp->dp.lt.nr = nr;
+ outp->dp.lt.bw = outp->dp.rate[rate].rate;
+ outp->dp.lt.mst = mst;
+ if (nouveau_dp_train_link(outp, false))
+ goto done;
+ }
+ }
+ }
+
+ ret = false;
+done:
+ mutex_unlock(&outp->dp.hpd_irq_lock);
+ return ret;
+}
+
+static bool
+nouveau_dp_link_check_locked(struct nouveau_encoder *outp)
{
- struct nouveau_encoder *nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
+ u8 link_status[DP_LINK_STATUS_SIZE];
- if (!nv_encoder || nv_encoder->outp.or.id < 0)
+ if (!outp || !outp->dp.lt.nr)
return true;
- return nvif_outp_dp_retrain(&nv_encoder->outp) == 0;
+ if (drm_dp_dpcd_read_phy_link_status(&outp->conn->aux, DP_PHY_DPRX, link_status) < 0)
+ return false;
+
+ if (drm_dp_channel_eq_ok(link_status, outp->dp.lt.nr))
+ return true;
+
+ return nouveau_dp_train_link(outp, true);
+}
+
+bool
+nouveau_dp_link_check(struct nouveau_connector *nv_connector)
+{
+ struct nouveau_encoder *outp = nv_connector->dp_encoder;
+ bool link_ok = true;
+
+ if (outp) {
+ mutex_lock(&outp->dp.hpd_irq_lock);
+ if (outp->dp.lt.nr)
+ link_ok = nouveau_dp_link_check_locked(outp);
+ mutex_unlock(&outp->dp.hpd_irq_lock);
+ }
+
+ return link_ok;
}
void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 4396f501b16a..50589f982d1a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1133,7 +1133,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
}
get_task_comm(tmpname, current);
- snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
+ rcu_read_lock();
+ snprintf(name, sizeof(name), "%s[%d]",
+ tmpname, pid_nr(rcu_dereference(fpriv->pid)));
+ rcu_read_unlock();
if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
ret = -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index bcba1a14cfab..333042fc493f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -49,8 +49,9 @@ struct nouveau_encoder {
struct nvif_outp outp;
int or;
+ struct nouveau_connector *conn;
+
struct i2c_adapter *i2c;
- struct nvkm_i2c_aux *aux;
/* different to drm_encoder.crtc, this reflects what's
* actually programmed on the hw, not the proposed crtc */
@@ -60,7 +61,6 @@ struct nouveau_encoder {
/* Protected by nouveau_drm.audio.lock */
struct {
bool enabled;
- struct drm_connector *connector;
} audio;
struct drm_display_mode mode;
@@ -68,18 +68,38 @@ struct nouveau_encoder {
struct nv04_output_reg restore;
- union {
+ struct {
+ struct {
+ bool enabled;
+ } hdmi;
+
struct {
struct nv50_mstm *mstm;
+
+ struct {
+ u8 caps[DP_LTTPR_COMMON_CAP_SIZE];
+ u8 nr;
+ } lttpr;
+
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ struct nvif_outp_dp_rate rate[8];
+ int rate_nr;
+
int link_nr;
int link_bw;
+ struct {
+ bool mst;
+ u8 nr;
+ u32 bw;
+ } lt;
+
/* Protects DP state that needs to be accessed outside
* connector reprobing contexts
*/
struct mutex hpd_irq_lock;
- u8 dpcd[DP_RECEIVER_CAP_SIZE];
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct drm_dp_desc desc;
@@ -141,6 +161,8 @@ enum nouveau_dp_status {
};
int nouveau_dp_detect(struct nouveau_connector *, struct nouveau_encoder *);
+bool nouveau_dp_train(struct nouveau_encoder *, bool mst, u32 khz, u8 bpc);
+void nouveau_dp_power_down(struct nouveau_encoder *);
bool nouveau_dp_link_check(struct nouveau_connector *);
void nouveau_dp_irq(struct work_struct *);
enum drm_mode_status nv50_dp_mode_valid(struct nouveau_encoder *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c
index 19024ce21fbb..bf6c12f4342a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_exec.c
+++ b/drivers/gpu/drm/nouveau/nouveau_exec.c
@@ -107,8 +107,8 @@ nouveau_exec_job_submit(struct nouveau_job *job)
drm_exec_until_all_locked(exec) {
struct drm_gpuva *va;
- drm_gpuva_for_each_va(va, &uvmm->umgr) {
- if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+ drm_gpuvm_for_each_va(va, &uvmm->base) {
+ if (unlikely(va == &uvmm->base.kernel_alloc_node))
continue;
ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
index aae780e4a4aa..5cf892c50f43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c
@@ -329,7 +329,7 @@ nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
struct nouveau_uvma_region *reg;
int ret;
- if (!drm_gpuva_interval_empty(&uvmm->umgr, addr, range))
+ if (!drm_gpuvm_interval_empty(&uvmm->base, addr, range))
return -ENOSPC;
ret = nouveau_uvma_region_alloc(&reg);
@@ -384,7 +384,7 @@ nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
{
struct nouveau_uvmm *uvmm = reg->uvmm;
- return drm_gpuva_interval_empty(&uvmm->umgr,
+ return drm_gpuvm_interval_empty(&uvmm->base,
reg->va.addr,
reg->va.range);
}
@@ -444,7 +444,7 @@ op_map_prepare_unwind(struct nouveau_uvma *uvma)
static void
op_unmap_prepare_unwind(struct drm_gpuva *va)
{
- drm_gpuva_insert(va->mgr, va);
+ drm_gpuva_insert(va->vm, va);
}
static void
@@ -589,7 +589,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
uvma->region = args->region;
uvma->kind = args->kind;
- drm_gpuva_map(&uvmm->umgr, &uvma->va, op);
+ drm_gpuva_map(&uvmm->base, &uvma->va, op);
/* Keep a reference until this uvma is destroyed. */
nouveau_uvma_gem_get(uvma);
@@ -1194,7 +1194,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
goto unwind_continue;
}
- op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
+ op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
op->va.addr,
op->va.range);
if (IS_ERR(op->ops)) {
@@ -1205,7 +1205,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
op->ops);
if (ret) {
- drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ drm_gpuva_ops_free(&uvmm->base, op->ops);
op->ops = NULL;
op->reg = NULL;
goto unwind_continue;
@@ -1240,7 +1240,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
}
}
- op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr,
+ op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
op->va.addr,
op->va.range,
op->gem.obj,
@@ -1256,7 +1256,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
op->va.range,
op->flags & 0xff);
if (ret) {
- drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ drm_gpuva_ops_free(&uvmm->base, op->ops);
op->ops = NULL;
goto unwind_continue;
}
@@ -1264,7 +1264,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
break;
}
case OP_UNMAP:
- op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
+ op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
op->va.addr,
op->va.range);
if (IS_ERR(op->ops)) {
@@ -1275,7 +1275,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
op->ops);
if (ret) {
- drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ drm_gpuva_ops_free(&uvmm->base, op->ops);
op->ops = NULL;
goto unwind_continue;
}
@@ -1404,7 +1404,7 @@ unwind:
break;
}
- drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ drm_gpuva_ops_free(&uvmm->base, op->ops);
op->ops = NULL;
op->reg = NULL;
}
@@ -1509,7 +1509,7 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
}
if (!IS_ERR_OR_NULL(op->ops))
- drm_gpuva_ops_free(&uvmm->umgr, op->ops);
+ drm_gpuva_ops_free(&uvmm->base, op->ops);
if (obj)
drm_gem_object_put(obj);
@@ -1836,11 +1836,11 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
uvmm->kernel_managed_addr = kernel_managed_addr;
uvmm->kernel_managed_size = kernel_managed_size;
- drm_gpuva_manager_init(&uvmm->umgr, cli->name,
- NOUVEAU_VA_SPACE_START,
- NOUVEAU_VA_SPACE_END,
- kernel_managed_addr, kernel_managed_size,
- NULL);
+ drm_gpuvm_init(&uvmm->base, cli->name,
+ NOUVEAU_VA_SPACE_START,
+ NOUVEAU_VA_SPACE_END,
+ kernel_managed_addr, kernel_managed_size,
+ NULL);
ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
cli->vmm.vmm.object.oclass, RAW,
@@ -1855,7 +1855,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
return 0;
out_free_gpuva_mgr:
- drm_gpuva_manager_destroy(&uvmm->umgr);
+ drm_gpuvm_destroy(&uvmm->base);
out_unlock:
mutex_unlock(&cli->mutex);
return ret;
@@ -1877,11 +1877,11 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
wait_event(entity->job.wq, list_empty(&entity->job.list.head));
nouveau_uvmm_lock(uvmm);
- drm_gpuva_for_each_va_safe(va, next, &uvmm->umgr) {
+ drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
struct nouveau_uvma *uvma = uvma_from_va(va);
struct drm_gem_object *obj = va->gem.obj;
- if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
+ if (unlikely(va == &uvmm->base.kernel_alloc_node))
continue;
drm_gpuva_remove(va);
@@ -1910,7 +1910,7 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
mutex_lock(&cli->mutex);
nouveau_vmm_fini(&uvmm->vmm);
- drm_gpuva_manager_destroy(&uvmm->umgr);
+ drm_gpuvm_destroy(&uvmm->base);
mutex_unlock(&cli->mutex);
dma_resv_fini(&uvmm->resv);
diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.h b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
index fc7f6fd2a4e1..a308c59760a5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_uvmm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.h
@@ -3,13 +3,13 @@
#ifndef __NOUVEAU_UVMM_H__
#define __NOUVEAU_UVMM_H__
-#include <drm/drm_gpuva_mgr.h>
+#include <drm/drm_gpuvm.h>
#include "nouveau_drv.h"
struct nouveau_uvmm {
+ struct drm_gpuvm base;
struct nouveau_vmm vmm;
- struct drm_gpuva_manager umgr;
struct maple_tree region_mt;
struct mutex mutex;
struct dma_resv resv;
@@ -41,10 +41,10 @@ struct nouveau_uvma {
u8 kind;
};
-#define uvmm_from_mgr(x) container_of((x), struct nouveau_uvmm, umgr)
+#define uvmm_from_gpuvm(x) container_of((x), struct nouveau_uvmm, base)
#define uvma_from_va(x) container_of((x), struct nouveau_uvma, va)
-#define to_uvmm(x) uvmm_from_mgr((x)->va.mgr)
+#define to_uvmm(x) uvmm_from_gpuvm((x)->va.vm)
struct nouveau_uvmm_bind_job {
struct nouveau_job base;
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index a3264a0e933a..3a27245f467f 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -69,7 +69,7 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
} nop = {};
int ret;
- strncpy(args.name, name, sizeof(args.name));
+ strscpy_pad(args.name, name, sizeof(args.name));
ret = nvif_object_ctor(parent != client ? &parent->object : NULL,
name ? name : "nvifClient", 0,
NVIF_CLASS_CLIENT, &args, sizeof(args),
diff --git a/drivers/gpu/drm/nouveau/nvif/conn.c b/drivers/gpu/drm/nouveau/nvif/conn.c
index a3cf91aeae2d..9ee18cb99264 100644
--- a/drivers/gpu/drm/nouveau/nvif/conn.c
+++ b/drivers/gpu/drm/nouveau/nvif/conn.c
@@ -45,20 +45,6 @@ nvif_conn_event_ctor(struct nvif_conn *conn, const char *name, nvif_event_func f
return ret;
}
-int
-nvif_conn_hpd_status(struct nvif_conn *conn)
-{
- struct nvif_conn_hpd_status_v0 args;
- int ret;
-
- args.version = 0;
-
- ret = nvif_mthd(&conn->object, NVIF_CONN_V0_HPD_STATUS, &args, sizeof(args));
- NVIF_ERRON(ret, &conn->object, "[HPD_STATUS] support:%d present:%d",
- args.support, args.present);
- return ret ? ret : !!args.support + !!args.present;
-}
-
void
nvif_conn_dtor(struct nvif_conn *conn)
{
@@ -77,5 +63,25 @@ nvif_conn_ctor(struct nvif_disp *disp, const char *name, int id, struct nvif_con
ret = nvif_object_ctor(&disp->object, name ?: "nvifConn", id, NVIF_CLASS_CONN,
&args, sizeof(args), &conn->object);
NVIF_ERRON(ret, &disp->object, "[NEW conn id:%d]", id);
- return ret;
+ if (ret)
+ return ret;
+
+ conn->id = id;
+
+ switch (args.type) {
+ case NVIF_CONN_V0_VGA : conn->info.type = NVIF_CONN_VGA; break;
+ case NVIF_CONN_V0_TV : conn->info.type = NVIF_CONN_TV; break;
+ case NVIF_CONN_V0_DVI_I : conn->info.type = NVIF_CONN_DVI_I; break;
+ case NVIF_CONN_V0_DVI_D : conn->info.type = NVIF_CONN_DVI_D; break;
+ case NVIF_CONN_V0_LVDS : conn->info.type = NVIF_CONN_LVDS; break;
+ case NVIF_CONN_V0_LVDS_SPWG: conn->info.type = NVIF_CONN_LVDS_SPWG; break;
+ case NVIF_CONN_V0_HDMI : conn->info.type = NVIF_CONN_HDMI; break;
+ case NVIF_CONN_V0_DP : conn->info.type = NVIF_CONN_DP; break;
+ case NVIF_CONN_V0_EDP : conn->info.type = NVIF_CONN_EDP; break;
+ default:
+ break;
+ }
+
+ return 0;
+
}
diff --git a/drivers/gpu/drm/nouveau/nvif/disp.c b/drivers/gpu/drm/nouveau/nvif/disp.c
index 09915f2715af..097246e10cdb 100644
--- a/drivers/gpu/drm/nouveau/nvif/disp.c
+++ b/drivers/gpu/drm/nouveau/nvif/disp.c
@@ -60,7 +60,7 @@ nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct
cid = nvif_sclass(&device->object, disps, oclass);
disp->object.client = NULL;
if (cid < 0) {
- NVIF_ERRON(cid, &device->object, "[NEW disp%04x] not supported", oclass);
+ NVIF_DEBUG(&device->object, "[NEW disp%04x] not supported", oclass);
return cid;
}
diff --git a/drivers/gpu/drm/nouveau/nvif/outp.c b/drivers/gpu/drm/nouveau/nvif/outp.c
index c24bc5eae3ec..5d3190c05250 100644
--- a/drivers/gpu/drm/nouveau/nvif/outp.c
+++ b/drivers/gpu/drm/nouveau/nvif/outp.c
@@ -47,10 +47,134 @@ nvif_outp_dp_mst_vcpi(struct nvif_outp *outp, int head,
}
int
-nvif_outp_dp_retrain(struct nvif_outp *outp)
+nvif_outp_dp_mst_id_put(struct nvif_outp *outp, u32 id)
{
- int ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_RETRAIN, NULL, 0);
- NVIF_ERRON(ret, &outp->object, "[DP_RETRAIN]");
+ struct nvif_outp_dp_mst_id_get_v0 args;
+ int ret;
+
+ args.version = 0;
+ args.id = id;
+ ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_MST_ID_PUT, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object, "[DP_MST_ID_PUT id:%08x]", args.id);
+ return ret;
+}
+
+int
+nvif_outp_dp_mst_id_get(struct nvif_outp *outp, u32 *id)
+{
+ struct nvif_outp_dp_mst_id_get_v0 args;
+ int ret;
+
+ args.version = 0;
+ ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_MST_ID_GET, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object, "[DP_MST_ID_GET] id:%08x", args.id);
+ if (ret)
+ return ret;
+
+ *id = args.id;
+ return 0;
+}
+
+int
+nvif_outp_dp_sst(struct nvif_outp *outp, int head, u32 watermark, u32 hblanksym, u32 vblanksym)
+{
+ struct nvif_outp_dp_sst_v0 args;
+ int ret;
+
+ args.version = 0;
+ args.head = head;
+ args.watermark = watermark;
+ args.hblanksym = hblanksym;
+ args.vblanksym = vblanksym;
+ ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_SST, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object,
+ "[DP_SST head:%d watermark:%d hblanksym:%d vblanksym:%d]",
+ args.head, args.watermark, args.hblanksym, args.vblanksym);
+ return ret;
+}
+
+int
+nvif_outp_dp_drive(struct nvif_outp *outp, u8 link_nr, u8 pe[4], u8 vs[4])
+{
+ struct nvif_outp_dp_drive_v0 args;
+ int ret;
+
+ args.version = 0;
+ args.lanes = link_nr;
+ memcpy(args.pe, pe, sizeof(args.pe));
+ memcpy(args.vs, vs, sizeof(args.vs));
+
+ ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_DRIVE, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object, "[DP_DRIVE lanes:%d]", args.lanes);
+ return ret;
+}
+
+int
+nvif_outp_dp_train(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE], u8 lttprs,
+ u8 link_nr, u32 link_bw, bool mst, bool post_lt_adj, bool retrain)
+{
+ struct nvif_outp_dp_train_v0 args;
+ int ret;
+
+ args.version = 0;
+ args.retrain = retrain;
+ args.mst = mst;
+ args.lttprs = lttprs;
+ args.post_lt_adj = post_lt_adj;
+ args.link_nr = link_nr;
+ args.link_bw = link_bw;
+ memcpy(args.dpcd, dpcd, sizeof(args.dpcd));
+
+ ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_TRAIN, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object,
+ "[DP_TRAIN retrain:%d mst:%d lttprs:%d post_lt_adj:%d nr:%d bw:%d]",
+ args.retrain, args.mst, args.lttprs, args.post_lt_adj, args.link_nr,
+ args.link_bw);
+ return ret;
+}
+
+int
+nvif_outp_dp_rates(struct nvif_outp *outp, struct nvif_outp_dp_rate *rate, int rate_nr)
+{
+ struct nvif_outp_dp_rates_v0 args;
+ int ret;
+
+ if (rate_nr > ARRAY_SIZE(args.rate))
+ return -EINVAL;
+
+ args.version = 0;
+ args.rates = rate_nr;
+ for (int i = 0; i < args.rates; i++, rate++) {
+ args.rate[i].dpcd = rate->dpcd;
+ args.rate[i].rate = rate->rate;
+ }
+
+ ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_RATES, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object, "[DP_RATES rates:%d]", args.rates);
+ return ret;
+}
+
+int
+nvif_outp_dp_aux_xfer(struct nvif_outp *outp, u8 type, u8 *psize, u32 addr, u8 *data)
+{
+ struct nvif_outp_dp_aux_xfer_v0 args;
+ u8 size = *psize;
+ int ret;
+
+ args.version = 0;
+ args.type = type;
+ args.size = size;
+ args.addr = addr;
+ memcpy(args.data, data, size);
+ ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_AUX_XFER, &args, sizeof(args));
+ NVIF_DEBUG(&outp->object, "[DP_AUX_XFER type:%d size:%d addr:%05x] %d size:%d (ret: %d)",
+ args.type, size, args.addr, ret, args.size, ret);
+ if (ret < 0)
+ return ret;
+
+ *psize = args.size;
+
+ memcpy(data, args.data, size);
return ret;
}
@@ -101,6 +225,74 @@ nvif_outp_infoframe(struct nvif_outp *outp, u8 type, struct nvif_outp_infoframe_
return ret;
}
+int
+nvif_outp_hdmi(struct nvif_outp *outp, int head, bool enable, u8 max_ac_packet, u8 rekey,
+ u32 khz, bool scdc, bool scdc_scrambling, bool scdc_low_rates)
+{
+ struct nvif_outp_hdmi_v0 args;
+ int ret;
+
+ args.version = 0;
+ args.head = head;
+ args.enable = enable;
+ args.max_ac_packet = max_ac_packet;
+ args.rekey = rekey;
+ args.khz = khz;
+ args.scdc = scdc;
+ args.scdc_scrambling = scdc_scrambling;
+ args.scdc_low_rates = scdc_low_rates;
+
+ ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDMI, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object,
+ "[HDMI head:%d enable:%d max_ac_packet:%d rekey:%d khz:%d scdc:%d "
+ "scdc_scrambling:%d scdc_low_rates:%d]",
+ args.head, args.enable, args.max_ac_packet, args.rekey, args.khz,
+ args.scdc, args.scdc_scrambling, args.scdc_low_rates);
+ return ret;
+}
+
+int
+nvif_outp_lvds(struct nvif_outp *outp, bool dual, bool bpc8)
+{
+ struct nvif_outp_lvds_v0 args;
+ int ret;
+
+ args.version = 0;
+ args.dual = dual;
+ args.bpc8 = bpc8;
+
+ ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_LVDS, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object, "[LVDS dual:%d 8bpc:%d]", args.dual, args.bpc8);
+ return ret;
+}
+
+int
+nvif_outp_bl_set(struct nvif_outp *outp, int level)
+{
+ struct nvif_outp_bl_set_v0 args;
+ int ret;
+
+ args.version = 0;
+ args.level = level;
+
+ ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_BL_SET, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object, "[BL_SET level:%d]", args.level);
+ return ret;
+}
+
+int
+nvif_outp_bl_get(struct nvif_outp *outp)
+{
+ struct nvif_outp_bl_get_v0 args;
+ int ret;
+
+ args.version = 0;
+
+ ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_BL_GET, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object, "[BL_GET level:%d]", args.level);
+ return ret ? ret : args.level;
+}
+
void
nvif_outp_release(struct nvif_outp *outp)
{
@@ -110,12 +302,12 @@ nvif_outp_release(struct nvif_outp *outp)
}
static inline int
-nvif_outp_acquire(struct nvif_outp *outp, u8 proto, struct nvif_outp_acquire_v0 *args)
+nvif_outp_acquire(struct nvif_outp *outp, u8 type, struct nvif_outp_acquire_v0 *args)
{
int ret;
args->version = 0;
- args->proto = proto;
+ args->type = type;
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_ACQUIRE, args, sizeof(*args));
if (ret)
@@ -127,73 +319,106 @@ nvif_outp_acquire(struct nvif_outp *outp, u8 proto, struct nvif_outp_acquire_v0
}
int
-nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
- int link_nr, int link_bw, bool hda, bool mst)
+nvif_outp_acquire_pior(struct nvif_outp *outp)
{
struct nvif_outp_acquire_v0 args;
int ret;
- args.dp.link_nr = link_nr;
- args.dp.link_bw = link_bw;
- args.dp.hda = hda;
- args.dp.mst = mst;
- memcpy(args.dp.dpcd, dpcd, sizeof(args.dp.dpcd));
-
- ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_DP, &args);
- NVIF_ERRON(ret, &outp->object,
- "[ACQUIRE proto:DP link_nr:%d link_bw:%02x hda:%d mst:%d] or:%d link:%d",
- args.dp.link_nr, args.dp.link_bw, args.dp.hda, args.dp.mst, args.or, args.link);
+ ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_PIOR, &args);
+ NVIF_ERRON(ret, &outp->object, "[ACQUIRE PIOR] or:%d", args.or);
return ret;
}
int
-nvif_outp_acquire_lvds(struct nvif_outp *outp, bool dual, bool bpc8)
+nvif_outp_acquire_sor(struct nvif_outp *outp, bool hda)
{
struct nvif_outp_acquire_v0 args;
int ret;
- args.lvds.dual = dual;
- args.lvds.bpc8 = bpc8;
+ args.sor.hda = hda;
- ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_LVDS, &args);
- NVIF_ERRON(ret, &outp->object,
- "[ACQUIRE proto:LVDS dual:%d 8bpc:%d] or:%d link:%d",
- args.lvds.dual, args.lvds.bpc8, args.or, args.link);
+ ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_SOR, &args);
+ NVIF_ERRON(ret, &outp->object, "[ACQUIRE SOR] or:%d link:%d", args.or, args.link);
return ret;
}
int
-nvif_outp_acquire_tmds(struct nvif_outp *outp, int head,
- bool hdmi, u8 max_ac_packet, u8 rekey, u8 scdc, bool hda)
+nvif_outp_acquire_dac(struct nvif_outp *outp)
{
struct nvif_outp_acquire_v0 args;
int ret;
- args.tmds.head = head;
- args.tmds.hdmi = hdmi;
- args.tmds.hdmi_max_ac_packet = max_ac_packet;
- args.tmds.hdmi_rekey = rekey;
- args.tmds.hdmi_scdc = scdc;
- args.tmds.hdmi_hda = hda;
-
- ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_TMDS, &args);
- NVIF_ERRON(ret, &outp->object,
- "[ACQUIRE proto:TMDS head:%d hdmi:%d max_ac_packet:%d rekey:%d scdc:%d hda:%d]"
- " or:%d link:%d", args.tmds.head, args.tmds.hdmi, args.tmds.hdmi_max_ac_packet,
- args.tmds.hdmi_rekey, args.tmds.hdmi_scdc, args.tmds.hdmi_hda,
- args.or, args.link);
+ ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_DAC, &args);
+ NVIF_ERRON(ret, &outp->object, "[ACQUIRE DAC] or:%d", args.or);
return ret;
}
+static int
+nvif_outp_inherit(struct nvif_outp *outp,
+ u8 proto,
+ struct nvif_outp_inherit_v0 *args,
+ u8 *proto_out)
+{
+ int ret;
+
+ args->version = 0;
+ args->proto = proto;
+
+ ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_INHERIT, args, sizeof(*args));
+ if (ret)
+ return ret;
+
+ outp->or.id = args->or;
+ outp->or.link = args->link;
+ *proto_out = args->proto;
+ return 0;
+}
+
int
-nvif_outp_acquire_rgb_crt(struct nvif_outp *outp)
+nvif_outp_inherit_lvds(struct nvif_outp *outp, u8 *proto_out)
{
- struct nvif_outp_acquire_v0 args;
+ struct nvif_outp_inherit_v0 args;
int ret;
- ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_RGB_CRT, &args);
- NVIF_ERRON(ret, &outp->object, "[ACQUIRE proto:RGB_CRT] or:%d", args.or);
- return ret;
+ ret = nvif_outp_inherit(outp, NVIF_OUTP_INHERIT_V0_LVDS, &args, proto_out);
+ NVIF_ERRON(ret && ret != -ENODEV, &outp->object, "[INHERIT proto:LVDS] ret:%d", ret);
+ return ret ?: args.head;
+}
+
+int
+nvif_outp_inherit_tmds(struct nvif_outp *outp, u8 *proto_out)
+{
+ struct nvif_outp_inherit_v0 args;
+ int ret;
+
+ ret = nvif_outp_inherit(outp, NVIF_OUTP_INHERIT_V0_TMDS, &args, proto_out);
+ NVIF_ERRON(ret && ret != -ENODEV, &outp->object, "[INHERIT proto:TMDS] ret:%d", ret);
+ return ret ?: args.head;
+}
+
+int
+nvif_outp_inherit_dp(struct nvif_outp *outp, u8 *proto_out)
+{
+ struct nvif_outp_inherit_v0 args;
+ int ret;
+
+ ret = nvif_outp_inherit(outp, NVIF_OUTP_INHERIT_V0_DP, &args, proto_out);
+ NVIF_ERRON(ret && ret != -ENODEV, &outp->object, "[INHERIT proto:DP] ret:%d", ret);
+
+ // TODO: Get current link info
+
+ return ret ?: args.head;
+}
+
+int
+nvif_outp_inherit_rgb_crt(struct nvif_outp *outp, u8 *proto_out)
+{
+ struct nvif_outp_inherit_v0 args;
+ int ret;
+
+ ret = nvif_outp_inherit(outp, NVIF_OUTP_INHERIT_V0_RGB_CRT, &args, proto_out);
+ NVIF_ERRON(ret && ret != -ENODEV, &outp->object, "[INHERIT proto:RGB_CRT] ret:%d", ret);
+ return ret ?: args.head;
}
int
@@ -210,6 +435,61 @@ nvif_outp_load_detect(struct nvif_outp *outp, u32 loadval)
return ret < 0 ? ret : args.load;
}
+int
+nvif_outp_edid_get(struct nvif_outp *outp, u8 **pedid)
+{
+ struct nvif_outp_edid_get_v0 *args;
+ int ret;
+
+ args = kmalloc(sizeof(*args), GFP_KERNEL);
+ if (!args)
+ return -ENOMEM;
+
+ args->version = 0;
+
+ ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_EDID_GET, args, sizeof(*args));
+ NVIF_ERRON(ret, &outp->object, "[EDID_GET] size:%d", args->size);
+ if (ret)
+ goto done;
+
+ *pedid = kmalloc(args->size, GFP_KERNEL);
+ if (!*pedid) {
+ ret = -ENOMEM;
+ goto done;
+ }
+
+ memcpy(*pedid, args->data, args->size);
+ ret = args->size;
+done:
+ kfree(args);
+ return ret;
+}
+
+enum nvif_outp_detect_status
+nvif_outp_detect(struct nvif_outp *outp)
+{
+ struct nvif_outp_detect_v0 args;
+ int ret;
+
+ args.version = 0;
+
+ ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_DETECT, &args, sizeof(args));
+ NVIF_ERRON(ret, &outp->object, "[DETECT] status:%02x", args.status);
+ if (ret)
+ return UNKNOWN;
+
+ switch (args.status) {
+ case NVIF_OUTP_DETECT_V0_NOT_PRESENT: return NOT_PRESENT;
+ case NVIF_OUTP_DETECT_V0_PRESENT: return PRESENT;
+ case NVIF_OUTP_DETECT_V0_UNKNOWN: return UNKNOWN;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return UNKNOWN;
+}
+
void
nvif_outp_dtor(struct nvif_outp *outp)
{
@@ -231,6 +511,50 @@ nvif_outp_ctor(struct nvif_disp *disp, const char *name, int id, struct nvif_out
if (ret)
return ret;
+ outp->id = args.id;
+
+ switch (args.type) {
+ case NVIF_OUTP_V0_TYPE_DAC : outp->info.type = NVIF_OUTP_DAC; break;
+ case NVIF_OUTP_V0_TYPE_SOR : outp->info.type = NVIF_OUTP_SOR; break;
+ case NVIF_OUTP_V0_TYPE_PIOR: outp->info.type = NVIF_OUTP_PIOR; break;
+ break;
+ default:
+ WARN_ON(1);
+ nvif_outp_dtor(outp);
+ return -EINVAL;
+ }
+
+ switch (args.proto) {
+ case NVIF_OUTP_V0_PROTO_RGB_CRT:
+ outp->info.proto = NVIF_OUTP_RGB_CRT;
+ outp->info.rgb_crt.freq_max = args.rgb_crt.freq_max;
+ break;
+ case NVIF_OUTP_V0_PROTO_TMDS:
+ outp->info.proto = NVIF_OUTP_TMDS;
+ outp->info.tmds.dual = args.tmds.dual;
+ break;
+ case NVIF_OUTP_V0_PROTO_LVDS:
+ outp->info.proto = NVIF_OUTP_LVDS;
+ outp->info.lvds.acpi_edid = args.lvds.acpi_edid;
+ break;
+ case NVIF_OUTP_V0_PROTO_DP:
+ outp->info.proto = NVIF_OUTP_DP;
+ outp->info.dp.aux = args.dp.aux;
+ outp->info.dp.mst = args.dp.mst;
+ outp->info.dp.increased_wm = args.dp.increased_wm;
+ outp->info.dp.link_nr = args.dp.link_nr;
+ outp->info.dp.link_bw = args.dp.link_bw;
+ break;
+ default:
+ WARN_ON(1);
+ nvif_outp_dtor(outp);
+ return -EINVAL;
+ }
+
+ outp->info.heads = args.heads;
+ outp->info.ddc = args.ddc;
+ outp->info.conn = args.conn;
+
outp->or.id = -1;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
index 91fb494d4009..374212da9e95 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/firmware.c
@@ -79,8 +79,7 @@ nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver,
int i;
/* Convert device name to lowercase */
- strncpy(cname, device->chip->name, sizeof(cname));
- cname[sizeof(cname) - 1] = '\0';
+ strscpy(cname, device->chip->name, sizeof(cname));
i = strlen(cname);
while (i) {
--i;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index c69daac9bac7..a705c2dfca80 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -140,12 +140,23 @@ nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
{
struct nvkm_instmem *imem = device->imem;
struct nvkm_memory *memory;
+ bool preserve = true;
int ret;
- if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
+ if (unlikely(!imem))
return -ENOSYS;
- ret = nvkm_instobj_new(imem, size, align, zero, &memory);
+ switch (target) {
+ case NVKM_MEM_TARGET_INST_SR_LOST:
+ preserve = false;
+ break;
+ case NVKM_MEM_TARGET_INST:
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ ret = nvkm_instobj_new(imem, size, align, zero, preserve, &memory);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
index 73104b59f97f..39f7e7ce9f4a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c
@@ -23,15 +23,12 @@
*/
#include "priv.h"
#include "conn.h"
-#include "dp.h"
#include "head.h"
#include "ior.h"
#include "outp.h"
#include <core/client.h>
#include <core/ramht.h>
-#include <subdev/bios.h>
-#include <subdev/bios/dcb.h>
#include <nvif/class.h>
#include <nvif/cl0046.h>
@@ -105,18 +102,14 @@ static int
nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
{
struct nvkm_disp *disp = nvkm_disp(engine);
- struct nvkm_conn *conn;
struct nvkm_outp *outp;
if (disp->func->fini)
disp->func->fini(disp);
list_for_each_entry(outp, &disp->outps, head) {
- nvkm_outp_fini(outp);
- }
-
- list_for_each_entry(conn, &disp->conns, head) {
- nvkm_conn_fini(conn);
+ if (outp->func->fini)
+ outp->func->fini(outp);
}
return 0;
@@ -126,16 +119,12 @@ static int
nvkm_disp_init(struct nvkm_engine *engine)
{
struct nvkm_disp *disp = nvkm_disp(engine);
- struct nvkm_conn *conn;
struct nvkm_outp *outp;
struct nvkm_ior *ior;
- list_for_each_entry(conn, &disp->conns, head) {
- nvkm_conn_init(conn);
- }
-
list_for_each_entry(outp, &disp->outps, head) {
- nvkm_outp_init(outp);
+ if (outp->func->init)
+ outp->func->init(outp);
}
if (disp->func->init) {
@@ -159,142 +148,15 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
{
struct nvkm_disp *disp = nvkm_disp(engine);
struct nvkm_subdev *subdev = &disp->engine.subdev;
- struct nvkm_bios *bios = subdev->device->bios;
- struct nvkm_outp *outp, *outt, *pair;
- struct nvkm_conn *conn;
struct nvkm_head *head;
- struct nvkm_ior *ior;
- struct nvbios_connE connE;
- struct dcb_output dcbE;
- u8 hpd = 0, ver, hdr;
- u32 data;
int ret, i;
- /* Create output path objects for each VBIOS display path. */
- i = -1;
- while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
- if (ver < 0x40) /* No support for chipsets prior to NV50. */
- break;
- if (dcbE.type == DCB_OUTPUT_UNUSED)
- continue;
- if (dcbE.type == DCB_OUTPUT_EOL)
- break;
- outp = NULL;
-
- switch (dcbE.type) {
- case DCB_OUTPUT_ANALOG:
- case DCB_OUTPUT_TV:
- case DCB_OUTPUT_TMDS:
- case DCB_OUTPUT_LVDS:
- ret = nvkm_outp_new(disp, i, &dcbE, &outp);
- break;
- case DCB_OUTPUT_DP:
- ret = nvkm_dp_new(disp, i, &dcbE, &outp);
- break;
- case DCB_OUTPUT_WFD:
- /* No support for WFD yet. */
- ret = -ENODEV;
- continue;
- default:
- nvkm_warn(subdev, "dcb %d type %d unknown\n",
- i, dcbE.type);
- continue;
- }
-
- if (ret) {
- if (outp) {
- if (ret != -ENODEV)
- OUTP_ERR(outp, "ctor failed: %d", ret);
- else
- OUTP_DBG(outp, "not supported");
- nvkm_outp_del(&outp);
- continue;
- }
- nvkm_error(subdev, "failed to create outp %d\n", i);
- continue;
- }
-
- list_add_tail(&outp->head, &disp->outps);
- hpd = max(hpd, (u8)(dcbE.connector + 1));
- }
-
- /* Create connector objects based on available output paths. */
- list_for_each_entry_safe(outp, outt, &disp->outps, head) {
- /* VBIOS data *should* give us the most useful information. */
- data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
- &connE);
-
- /* No bios connector data... */
- if (!data) {
- /* Heuristic: anything with the same ccb index is
- * considered to be on the same connector, any
- * output path without an associated ccb entry will
- * be put on its own connector.
- */
- int ccb_index = outp->info.i2c_index;
- if (ccb_index != 0xf) {
- list_for_each_entry(pair, &disp->outps, head) {
- if (pair->info.i2c_index == ccb_index) {
- outp->conn = pair->conn;
- break;
- }
- }
- }
-
- /* Connector shared with another output path. */
- if (outp->conn)
- continue;
-
- memset(&connE, 0x00, sizeof(connE));
- connE.type = DCB_CONNECTOR_NONE;
- i = -1;
- } else {
- i = outp->info.connector;
- }
-
- /* Check that we haven't already created this connector. */
- list_for_each_entry(conn, &disp->conns, head) {
- if (conn->index == outp->info.connector) {
- outp->conn = conn;
- break;
- }
- }
-
- if (outp->conn)
- continue;
-
- /* Apparently we need to create a new one! */
- ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
- if (ret) {
- nvkm_error(subdev, "failed to create outp %d conn: %d\n", outp->index, ret);
- nvkm_conn_del(&outp->conn);
- list_del(&outp->head);
- nvkm_outp_del(&outp);
- continue;
- }
-
- list_add_tail(&outp->conn->head, &disp->conns);
- }
-
if (disp->func->oneinit) {
ret = disp->func->oneinit(disp);
if (ret)
return ret;
}
- /* Enforce identity-mapped SOR assignment for panels, which have
- * certain bits (ie. backlight controls) wired to a specific SOR.
- */
- list_for_each_entry(outp, &disp->outps, head) {
- if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
- outp->conn->info.type == DCB_CONNECTOR_eDP) {
- ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
- if (!WARN_ON(!ior))
- ior->identity = true;
- outp->identity = true;
- }
- }
-
i = 0;
list_for_each_entry(head, &disp->heads, head)
i = max(i, head->id + 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
index fbdae1137864..ff88a5a5253a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.c
@@ -30,16 +30,6 @@
#include <nvif/event.h>
void
-nvkm_conn_fini(struct nvkm_conn *conn)
-{
-}
-
-void
-nvkm_conn_init(struct nvkm_conn *conn)
-{
-}
-
-void
nvkm_conn_del(struct nvkm_conn **pconn)
{
struct nvkm_conn *conn = *pconn;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
index a0600e72b0ec..01c3146c7066 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/conn.h
@@ -19,8 +19,6 @@ struct nvkm_conn {
int nvkm_conn_new(struct nvkm_disp *, int index, struct nvbios_connE *,
struct nvkm_conn **);
void nvkm_conn_del(struct nvkm_conn **);
-void nvkm_conn_init(struct nvkm_conn *);
-void nvkm_conn_fini(struct nvkm_conn *);
#define CONN_MSG(c,l,f,a...) do { \
struct nvkm_conn *_conn = (c); \
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
index b8ac66b4a2c4..a109348bd63b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dp.c
@@ -41,6 +41,40 @@
*/
#define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
+static int
+nvkm_dp_mst_id_put(struct nvkm_outp *outp, u32 id)
+{
+ return 0;
+}
+
+static int
+nvkm_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
+{
+ *pid = BIT(outp->index);
+ return 0;
+}
+
+static int
+nvkm_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *size)
+{
+ int ret = nvkm_i2c_aux_acquire(outp->dp.aux);
+
+ if (ret)
+ return ret;
+
+ ret = nvkm_i2c_aux_xfer(outp->dp.aux, false, type, addr, data, size);
+ nvkm_i2c_aux_release(outp->dp.aux);
+ return ret;
+}
+
+static int
+nvkm_dp_aux_pwr(struct nvkm_outp *outp, bool pu)
+{
+ outp->dp.enabled = pu;
+ nvkm_dp_enable(outp, outp->dp.enabled);
+ return 0;
+}
+
struct lt_state {
struct nvkm_outp *outp;
@@ -282,31 +316,20 @@ nvkm_dp_train_link(struct nvkm_outp *outp, int rate)
struct lt_state lt = {
.outp = outp,
.pc2 = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED,
+ .repeaters = outp->dp.lttprs,
};
- u8 sink[2], data;
+ u8 sink[2];
int ret;
OUTP_DBG(outp, "training %dx%02x", ior->dp.nr, ior->dp.bw);
- /* Select LTTPR non-transparent mode if we have a valid configuration,
- * use transparent mode otherwise.
- */
- if (outp->dp.lttpr[0] >= 0x14) {
- data = DPCD_LTTPR_MODE_TRANSPARENT;
- nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
-
- if (outp->dp.lttprs) {
- data = DPCD_LTTPR_MODE_NON_TRANSPARENT;
- nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
- lt.repeaters = outp->dp.lttprs;
- }
- }
-
/* Set desired link configuration on the sink. */
sink[0] = (outp->dp.rate[rate].dpcd < 0) ? ior->dp.bw : 0;
sink[1] = ior->dp.nr;
if (ior->dp.ef)
sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
+ if (outp->dp.lt.post_adj)
+ sink[1] |= 0x20;
ret = nvkm_wraux(outp->dp.aux, DPCD_LC00_LINK_BW_SET, sink, 2);
if (ret)
@@ -447,71 +470,58 @@ nvkm_dp_train_init(struct nvkm_outp *outp)
}
static int
-nvkm_dp_train(struct nvkm_outp *outp, u32 dataKBps)
+nvkm_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4])
+{
+ struct lt_state lt = {
+ .outp = outp,
+ .stat[4] = (pe[0] << 2) | (vs[0] << 0) |
+ (pe[1] << 6) | (vs[1] << 4),
+ .stat[5] = (pe[2] << 2) | (vs[2] << 0) |
+ (pe[3] << 6) | (vs[3] << 4),
+ };
+
+ return nvkm_dp_train_drive(&lt, false);
+}
+
+static int
+nvkm_dp_train(struct nvkm_outp *outp, bool retrain)
{
struct nvkm_ior *ior = outp->ior;
- int ret = -EINVAL, nr, rate;
- u8 pwr;
+ int ret, rate;
- /* Retraining link? Skip source configuration, it can mess up the active modeset. */
- if (atomic_read(&outp->dp.lt.done)) {
- for (rate = 0; rate < outp->dp.rates; rate++) {
- if (outp->dp.rate[rate].rate == ior->dp.bw * 27000)
- return nvkm_dp_train_link(outp, ret);
- }
- WARN_ON(1);
- return -EINVAL;
+ for (rate = 0; rate < outp->dp.rates; rate++) {
+ if (outp->dp.rate[rate].rate == (retrain ? ior->dp.bw : outp->dp.lt.bw) * 27000)
+ break;
}
- /* Ensure sink is not in a low-power state. */
- if (!nvkm_rdaux(outp->dp.aux, DPCD_SC00, &pwr, 1)) {
- if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
- pwr &= ~DPCD_SC00_SET_POWER;
- pwr |= DPCD_SC00_SET_POWER_D0;
- nvkm_wraux(outp->dp.aux, DPCD_SC00, &pwr, 1);
- }
+ if (WARN_ON(rate == outp->dp.rates))
+ return -EINVAL;
+
+ /* Retraining link? Skip source configuration, it can mess up the active modeset. */
+ if (retrain) {
+ mutex_lock(&outp->dp.mutex);
+ ret = nvkm_dp_train_link(outp, rate);
+ mutex_unlock(&outp->dp.mutex);
+ return ret;
}
+ mutex_lock(&outp->dp.mutex);
+ OUTP_DBG(outp, "training");
+
ior->dp.mst = outp->dp.lt.mst;
ior->dp.ef = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
- ior->dp.nr = 0;
+ ior->dp.bw = outp->dp.lt.bw;
+ ior->dp.nr = outp->dp.lt.nr;
- /* Link training. */
- OUTP_DBG(outp, "training");
nvkm_dp_train_init(outp);
-
- /* Validate and train at configuration requested (if any) on ACQUIRE. */
- if (outp->dp.lt.nr) {
- for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) {
- for (rate = 0; nr == outp->dp.lt.nr && rate < outp->dp.rates; rate++) {
- if (outp->dp.rate[rate].rate / 27000 == outp->dp.lt.bw) {
- ior->dp.bw = outp->dp.rate[rate].rate / 27000;
- ior->dp.nr = nr;
- ret = nvkm_dp_train_links(outp, rate);
- }
- }
- }
- }
-
- /* Otherwise, loop through all valid link configurations that support the data rate. */
- for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) {
- for (rate = 0; ret < 0 && rate < outp->dp.rates; rate++) {
- if (outp->dp.rate[rate].rate * nr >= dataKBps || WARN_ON(!ior->dp.nr)) {
- /* Program selected link configuration. */
- ior->dp.bw = outp->dp.rate[rate].rate / 27000;
- ior->dp.nr = nr;
- ret = nvkm_dp_train_links(outp, rate);
- }
- }
- }
-
- /* Finish up. */
+ ret = nvkm_dp_train_links(outp, rate);
nvkm_dp_train_fini(outp);
if (ret < 0)
OUTP_ERR(outp, "training failed");
else
OUTP_DBG(outp, "training done");
- atomic_set(&outp->dp.lt.done, 1);
+
+ mutex_unlock(&outp->dp.mutex);
return ret;
}
@@ -529,155 +539,10 @@ nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
static void
nvkm_dp_release(struct nvkm_outp *outp)
{
- /* Prevent link from being retrained if sink sends an IRQ. */
- atomic_set(&outp->dp.lt.done, 0);
outp->ior->dp.nr = 0;
-}
-
-static int
-nvkm_dp_acquire(struct nvkm_outp *outp)
-{
- struct nvkm_ior *ior = outp->ior;
- struct nvkm_head *head;
- bool retrain = true;
- u32 datakbps = 0;
- u32 dataKBps;
- u32 linkKBps;
- u8 stat[3];
- int ret, i;
-
- mutex_lock(&outp->dp.mutex);
-
- /* Check that link configuration meets current requirements. */
- list_for_each_entry(head, &outp->disp->heads, head) {
- if (ior->asy.head & (1 << head->id)) {
- u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000;
- datakbps += khz * head->asy.or.depth;
- }
- }
-
- linkKBps = ior->dp.bw * 27000 * ior->dp.nr;
- dataKBps = DIV_ROUND_UP(datakbps, 8);
- OUTP_DBG(outp, "data %d KB/s link %d KB/s mst %d->%d",
- dataKBps, linkKBps, ior->dp.mst, outp->dp.lt.mst);
- if (linkKBps < dataKBps || ior->dp.mst != outp->dp.lt.mst) {
- OUTP_DBG(outp, "link requirements changed");
- goto done;
- }
-
- /* Check that link is still trained. */
- ret = nvkm_rdaux(outp->dp.aux, DPCD_LS02, stat, 3);
- if (ret) {
- OUTP_DBG(outp, "failed to read link status, assuming no sink");
- goto done;
- }
-
- if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
- for (i = 0; i < ior->dp.nr; i++) {
- u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
- if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
- !(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
- !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
- OUTP_DBG(outp, "lane %d not equalised", lane);
- goto done;
- }
- }
- retrain = false;
- } else {
- OUTP_DBG(outp, "no inter-lane alignment");
- }
-
-done:
- if (retrain || !atomic_read(&outp->dp.lt.done))
- ret = nvkm_dp_train(outp, dataKBps);
- mutex_unlock(&outp->dp.mutex);
- return ret;
-}
-
-static bool
-nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp)
-{
- u8 sink_rates[DPCD_RC10_SUPPORTED_LINK_RATES__SIZE];
- int i, j, k;
-
- if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
- outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x13 ||
- nvkm_rdaux(outp->dp.aux, DPCD_RC10_SUPPORTED_LINK_RATES(0),
- sink_rates, sizeof(sink_rates)))
- return false;
-
- for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
- const u32 rate = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
-
- if (!rate || WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
- break;
-
- if (rate > outp->info.dpconf.link_bw * 27000) {
- OUTP_DBG(outp, "rate %d !outp", rate);
- continue;
- }
-
- for (j = 0; j < outp->dp.rates; j++) {
- if (rate > outp->dp.rate[j].rate) {
- for (k = outp->dp.rates; k > j; k--)
- outp->dp.rate[k] = outp->dp.rate[k - 1];
- break;
- }
- }
-
- outp->dp.rate[j].dpcd = i / 2;
- outp->dp.rate[j].rate = rate;
- outp->dp.rates++;
- }
-
- for (i = 0; i < outp->dp.rates; i++)
- OUTP_DBG(outp, "link_rate[%d] = %d", outp->dp.rate[i].dpcd, outp->dp.rate[i].rate);
+ nvkm_dp_disable(outp, outp->ior);
- return outp->dp.rates != 0;
-}
-
-/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
- * converted to work inside nvkm. This is a temporary holdover until we start
- * passing the drm_dp_aux device through NVKM
- */
-static int
-nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
-{
- struct nvkm_i2c_aux *aux = outp->dp.aux;
- u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- int ret;
-
- ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
- if (ret < 0)
- return ret;
-
- /*
- * Prior to DP1.3 the bit represented by
- * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
- * If it is set DP_DPCD_REV at 0000h could be at a value less than
- * the true capability of the panel. The only way to check is to
- * then compare 0000h and 2200h.
- */
- if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
- DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
- return 0;
-
- ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
- if (ret < 0)
- return ret;
-
- if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
- OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
- outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
- return 0;
- }
-
- if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
- return 0;
-
- memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
-
- return 0;
+ nvkm_outp_release(outp);
}
void
@@ -711,66 +576,11 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
OUTP_DBG(outp, "aux power -> always");
nvkm_i2c_aux_monitor(aux, true);
outp->dp.aux_pwr = true;
-
- /* Detect any LTTPRs before reading DPCD receiver caps. */
- if (!nvkm_rdaux(aux, DPCD_LTTPR_REV, outp->dp.lttpr, sizeof(outp->dp.lttpr)) &&
- outp->dp.lttpr[0] >= 0x14 && outp->dp.lttpr[2]) {
- switch (outp->dp.lttpr[2]) {
- case 0x80: outp->dp.lttprs = 1; break;
- case 0x40: outp->dp.lttprs = 2; break;
- case 0x20: outp->dp.lttprs = 3; break;
- case 0x10: outp->dp.lttprs = 4; break;
- case 0x08: outp->dp.lttprs = 5; break;
- case 0x04: outp->dp.lttprs = 6; break;
- case 0x02: outp->dp.lttprs = 7; break;
- case 0x01: outp->dp.lttprs = 8; break;
- default:
- /* Unknown LTTPR count, we'll switch to transparent mode. */
- WARN_ON(1);
- outp->dp.lttprs = 0;
- break;
- }
- } else {
- /* No LTTPR support, or zero LTTPR count - don't touch it at all. */
- memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
- }
-
- if (!nvkm_dp_read_dpcd_caps(outp)) {
- const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
- const u8 *rate;
- int rate_max;
-
- outp->dp.rates = 0;
- outp->dp.links = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
- outp->dp.links = min(outp->dp.links, outp->info.dpconf.link_nr);
- if (outp->dp.lttprs && outp->dp.lttpr[4])
- outp->dp.links = min_t(int, outp->dp.links, outp->dp.lttpr[4]);
-
- rate_max = outp->dp.dpcd[DPCD_RC01_MAX_LINK_RATE];
- rate_max = min(rate_max, outp->info.dpconf.link_bw);
- if (outp->dp.lttprs && outp->dp.lttpr[1])
- rate_max = min_t(int, rate_max, outp->dp.lttpr[1]);
-
- if (!nvkm_dp_enable_supported_link_rates(outp)) {
- for (rate = rates; *rate; rate++) {
- if (*rate > rate_max)
- continue;
-
- if (WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
- break;
-
- outp->dp.rate[outp->dp.rates].dpcd = -1;
- outp->dp.rate[outp->dp.rates].rate = *rate * 27000;
- outp->dp.rates++;
- }
- }
- }
} else
if (!auxpwr && outp->dp.aux_pwr) {
OUTP_DBG(outp, "aux power -> demand");
nvkm_i2c_aux_monitor(aux, false);
outp->dp.aux_pwr = false;
- atomic_set(&outp->dp.lt.done, 0);
/* Restore eDP panel GPIO to its prior state if we changed it, as
* it could potentially interfere with other outputs.
@@ -793,6 +603,7 @@ nvkm_dp_fini(struct nvkm_outp *outp)
static void
nvkm_dp_init(struct nvkm_outp *outp)
{
+ nvkm_outp_init(outp);
nvkm_dp_enable(outp, outp->dp.enabled);
}
@@ -807,9 +618,18 @@ nvkm_dp_func = {
.dtor = nvkm_dp_dtor,
.init = nvkm_dp_init,
.fini = nvkm_dp_fini,
- .acquire = nvkm_dp_acquire,
+ .detect = nvkm_outp_detect,
+ .inherit = nvkm_outp_inherit,
+ .acquire = nvkm_outp_acquire,
.release = nvkm_dp_release,
- .disable = nvkm_dp_disable,
+ .bl.get = nvkm_outp_bl_get,
+ .bl.set = nvkm_outp_bl_set,
+ .dp.aux_pwr = nvkm_dp_aux_pwr,
+ .dp.aux_xfer = nvkm_dp_aux_xfer,
+ .dp.train = nvkm_dp_train,
+ .dp.drive = nvkm_dp_drive,
+ .dp.mst_id_get = nvkm_dp_mst_id_get,
+ .dp.mst_id_put = nvkm_dp_mst_id_put,
};
int
@@ -819,7 +639,7 @@ nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct n
struct nvkm_bios *bios = device->bios;
struct nvkm_i2c *i2c = device->i2c;
struct nvkm_outp *outp;
- u8 hdr, cnt, len;
+ u8 ver, hdr, cnt, len;
u32 data;
int ret;
@@ -847,7 +667,9 @@ nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct n
OUTP_DBG(outp, "bios dp %02x %02x %02x %02x", outp->dp.version, hdr, cnt, len);
+ data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
+ outp->dp.mst = data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
+
mutex_init(&outp->dp.mutex);
- atomic_set(&outp->dp.lt.done, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
index 23ae451ba473..1be97a68a83e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g84.c
@@ -124,6 +124,7 @@ g84_sor = {
.state = nv50_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
+ .bl = &nv50_sor_bl,
.hdmi = &g84_sor_hdmi,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
index 67ef889a0c5f..843a2661ce9d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/g94.c
@@ -295,6 +295,7 @@ g94_sor = {
.clock = nv50_sor_clock,
.war_2 = g94_sor_war_2,
.war_3 = g94_sor_war_3,
+ .bl = &nv50_sor_bl,
.hdmi = &g84_sor_hdmi,
.dp = &g94_sor_dp,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
index 52099b75f52a..efe66ba3c61f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ga102.c
@@ -105,6 +105,7 @@ ga102_sor = {
.state = gv100_sor_state,
.power = nv50_sor_power,
.clock = ga102_sor_clock,
+ .bl = &gt215_sor_bl,
.hdmi = &gv100_sor_hdmi,
.dp = &ga102_sor_dp,
.hda = &gv100_sor_hda,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index a48e9bdf4cd0..b48ead31da30 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -328,6 +328,7 @@ gf119_sor = {
.state = gf119_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
+ .bl = &gt215_sor_bl,
.hdmi = &gf119_sor_hdmi,
.dp = &gf119_sor_dp,
.hda = &gf119_sor_hda,
@@ -1038,7 +1039,6 @@ gf119_disp_super(struct work_struct *work)
continue;
nv50_disp_super_2_0(disp, head);
}
- nvkm_outp_route(disp);
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00010000))
continue;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index 876a21a0cebb..a3e2fbadade4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -115,6 +115,7 @@ gk104_sor = {
.state = gf119_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
+ .bl = &gt215_sor_bl,
.hdmi = &gk104_sor_hdmi,
.dp = &gf119_sor_dp,
.hda = &gf119_sor_hda,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index b4d8e868616f..688e123ad482 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -70,6 +70,7 @@ gm107_sor = {
.state = gf119_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
+ .bl = &gt215_sor_bl,
.hdmi = &gk104_sor_hdmi,
.dp = &gm107_sor_dp,
.hda = &gf119_sor_hda,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index 562ebae57d44..511e7831b2f5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -68,15 +68,23 @@ gm200_sor_dp = {
};
void
-gm200_sor_hdmi_scdc(struct nvkm_ior *ior, u8 scdc)
+gm200_sor_hdmi_scdc(struct nvkm_ior *ior, u32 khz, bool support, bool scrambling,
+ bool scrambling_low_rates)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 soff = nv50_ior_base(ior);
- const u32 ctrl = scdc & 0x3;
+ u32 ctrl = 0;
- nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl);
+ ior->tmds.high_speed = khz > 340000;
+
+ if (support && scrambling) {
+ if (ior->tmds.high_speed)
+ ctrl |= 0x00000002;
+ if (ior->tmds.high_speed || scrambling_low_rates)
+ ctrl |= 0x00000001;
+ }
- ior->tmds.high_speed = !!(scdc & 0x2);
+ nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl);
}
const struct nvkm_ior_func_hdmi
@@ -139,6 +147,7 @@ gm200_sor = {
.state = gf119_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
+ .bl = &gt215_sor_bl,
.hdmi = &gm200_sor_hdmi,
.dp = &gm200_sor_dp,
.hda = &gf119_sor_hda,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index 7f1eb4332040..4070447bd800 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -37,6 +37,7 @@ gp100_sor = {
.state = gf119_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
+ .bl = &gt215_sor_bl,
.hdmi = &gm200_sor_hdmi,
.dp = &gm200_sor_dp,
.hda = &gf119_sor_hda,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
index 506ffbe7b842..6318721b66f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gt215.c
@@ -182,11 +182,49 @@ gt215_sor_hdmi = {
.infoframe_vsi = gt215_sor_hdmi_infoframe_vsi,
};
+static int
+gt215_sor_bl_set(struct nvkm_ior *ior, int lvl)
+{
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(ior);
+ u32 div, val;
+
+ div = nvkm_rd32(device, 0x61c080 + soff);
+ val = (lvl * div) / 100;
+ if (div)
+ nvkm_wr32(device, 0x61c084 + soff, 0xc0000000 | val);
+
+ return 0;
+}
+
+static int
+gt215_sor_bl_get(struct nvkm_ior *ior)
+{
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(ior);
+ u32 div, val;
+
+ div = nvkm_rd32(device, 0x61c080 + soff);
+ val = nvkm_rd32(device, 0x61c084 + soff);
+ val &= 0x00ffffff;
+ if (div && div >= val)
+ return ((val * 100) + (div / 2)) / div;
+
+ return 100;
+}
+
+const struct nvkm_ior_func_bl
+gt215_sor_bl = {
+ .get = gt215_sor_bl_get,
+ .set = gt215_sor_bl_set,
+};
+
static const struct nvkm_ior_func
gt215_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
+ .bl = &gt215_sor_bl,
.hdmi = &gt215_sor_hdmi,
.dp = &gt215_sor_dp,
.hda = &gt215_sor_hda,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
index 4ebc030e40d1..e1634f7bca56 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
@@ -212,6 +212,7 @@ gv100_sor = {
.state = gv100_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
+ .bl = &gt215_sor_bl,
.hdmi = &gv100_sor_hdmi,
.dp = &gv100_sor_dp,
.hda = &gv100_sor_hda,
@@ -863,7 +864,6 @@ gv100_disp_super(struct work_struct *work)
continue;
nv50_disp_super_2_0(disp, head);
}
- nvkm_outp_route(disp);
list_for_each_entry(head, &disp->heads, head) {
if (!(mask[head->id] & 0x00010000))
continue;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index da1b1a626ef2..9beb9d1e8633 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -63,11 +63,18 @@ struct nvkm_ior_func {
void (*war_2)(struct nvkm_ior *);
void (*war_3)(struct nvkm_ior *);
+ const struct nvkm_ior_func_bl {
+ int (*get)(struct nvkm_ior *);
+ int (*set)(struct nvkm_ior *, int lvl);
+ } *bl;
+
const struct nvkm_ior_func_hdmi {
void (*ctrl)(struct nvkm_ior *, int head, bool enable, u8 max_ac_packet, u8 rekey);
- void (*scdc)(struct nvkm_ior *, u8 scdc);
+ void (*scdc)(struct nvkm_ior *, u32 khz, bool support, bool scrambling,
+ bool scrambling_low_rates);
void (*infoframe_avi)(struct nvkm_ior *, int head, void *data, u32 size);
void (*infoframe_vsi)(struct nvkm_ior *, int head, void *data, u32 size);
+ void (*audio)(struct nvkm_ior *, int head, bool enable);
} *hdmi;
const struct nvkm_ior_func_dp {
@@ -77,6 +84,8 @@ struct nvkm_ior_func {
void (*pattern)(struct nvkm_ior *, int pattern);
void (*drive)(struct nvkm_ior *, int ln, int pc,
int dc, int pe, int tx_pu);
+ int (*sst)(struct nvkm_ior *, int head, bool ef,
+ u32 watermark, u32 hblanksym, u32 vblanksym);
void (*vcpi)(struct nvkm_ior *, int head, u8 slot,
u8 slot_nr, u16 pbn, u16 aligned);
void (*audio)(struct nvkm_ior *, int head, bool enable);
@@ -122,6 +131,7 @@ int nv50_sor_cnt(struct nvkm_disp *, unsigned long *);
void nv50_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
void nv50_sor_power(struct nvkm_ior *, bool, bool, bool, bool, bool);
void nv50_sor_clock(struct nvkm_ior *);
+extern const struct nvkm_ior_func_bl nv50_sor_bl;
int g84_sor_new(struct nvkm_disp *, int);
extern const struct nvkm_ior_func_hdmi g84_sor_hdmi;
@@ -138,6 +148,7 @@ void g94_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
void g94_sor_dp_activesym(struct nvkm_ior *, int, u8, u8, u8, u8);
void g94_sor_dp_watermark(struct nvkm_ior *, int, u8);
+extern const struct nvkm_ior_func_bl gt215_sor_bl;
extern const struct nvkm_ior_func_hdmi gt215_sor_hdmi;
void gt215_sor_dp_audio(struct nvkm_ior *, int, bool);
extern const struct nvkm_ior_func_hda gt215_sor_hda;
@@ -167,7 +178,7 @@ void gm107_sor_dp_pattern(struct nvkm_ior *, int);
void gm200_sor_route_set(struct nvkm_outp *, struct nvkm_ior *);
int gm200_sor_route_get(struct nvkm_outp *, int *);
extern const struct nvkm_ior_func_hdmi gm200_sor_hdmi;
-void gm200_sor_hdmi_scdc(struct nvkm_ior *, u8);
+void gm200_sor_hdmi_scdc(struct nvkm_ior *, u32, bool, bool, bool);
extern const struct nvkm_ior_func_dp gm200_sor_dp;
void gm200_sor_dp_drive(struct nvkm_ior *, int, int, int, int, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
index f96ba4752655..e0c5fb6df3d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/mcp89.c
@@ -44,6 +44,7 @@ mcp89_sor = {
.state = g94_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
+ .bl = &gt215_sor_bl,
.hdmi = &gt215_sor_hdmi,
.dp = &mcp89_sor_dp,
.hda = &gt215_sor_hda,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index be8116802960..2d05e2f7e46b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -23,7 +23,9 @@
*/
#include "priv.h"
#include "chan.h"
+#include "conn.h"
#include "head.h"
+#include "dp.h"
#include "ior.h"
#include "outp.h"
@@ -156,6 +158,37 @@ nv50_pior_cnt(struct nvkm_disp *disp, unsigned long *pmask)
return 3;
}
+static int
+nv50_sor_bl_set(struct nvkm_ior *ior, int lvl)
+{
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(ior);
+ u32 div = 1025;
+ u32 val = (lvl * div) / 100;
+
+ nvkm_wr32(device, 0x61c084 + soff, 0x80000000 | val);
+ return 0;
+}
+
+static int
+nv50_sor_bl_get(struct nvkm_ior *ior)
+{
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 soff = nv50_ior_base(ior);
+ u32 div = 1025;
+ u32 val;
+
+ val = nvkm_rd32(device, 0x61c084 + soff);
+ val &= 0x000007ff;
+ return ((val * 100) + (div / 2)) / div;
+}
+
+const struct nvkm_ior_func_bl
+nv50_sor_bl = {
+ .get = nv50_sor_bl_get,
+ .set = nv50_sor_bl_set,
+};
+
void
nv50_sor_clock(struct nvkm_ior *sor)
{
@@ -220,6 +253,7 @@ nv50_sor = {
.state = nv50_sor_state,
.power = nv50_sor_power,
.clock = nv50_sor_clock,
+ .bl = &nv50_sor_bl,
};
static int
@@ -1254,10 +1288,6 @@ nv50_disp_super_2_2(struct nvkm_disp *disp, struct nvkm_head *head)
ior->asy.link = outp->lvds.dual ? 3 : 1;
}
- /* Handle any link training, etc. */
- if (outp && outp->func->acquire)
- outp->func->acquire(outp);
-
/* Execute OnInt2 IED script. */
nv50_disp_super_ied_on(head, ior, 0, khz);
@@ -1287,7 +1317,6 @@ nv50_disp_super_2_1(struct nvkm_disp *disp, struct nvkm_head *head)
void
nv50_disp_super_2_0(struct nvkm_disp *disp, struct nvkm_head *head)
{
- struct nvkm_outp *outp;
struct nvkm_ior *ior;
/* Determine which OR, if any, we're detaching from the head. */
@@ -1298,14 +1327,6 @@ nv50_disp_super_2_0(struct nvkm_disp *disp, struct nvkm_head *head)
/* Execute OffInt2 IED script. */
nv50_disp_super_ied_off(head, ior, 2);
-
- /* If we're shutting down the OR's only active head, execute
- * the output path's disable function.
- */
- if (ior->arm.head == (1 << head->id)) {
- if ((outp = ior->arm.outp) && outp->func->disable)
- outp->func->disable(outp, ior);
- }
}
void
@@ -1371,7 +1392,6 @@ nv50_disp_super(struct work_struct *work)
continue;
nv50_disp_super_2_0(disp, head);
}
- nvkm_outp_route(disp);
list_for_each_entry(head, &disp->heads, head) {
if (!(super & (0x00000200 << head->id)))
continue;
@@ -1563,7 +1583,15 @@ nv50_disp_oneinit(struct nvkm_disp *disp)
const struct nvkm_disp_func *func = disp->func;
struct nvkm_subdev *subdev = &disp->engine.subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_bios *bios = device->bios;
+ struct nvkm_outp *outp, *outt, *pair;
+ struct nvkm_conn *conn;
+ struct nvkm_ior *ior;
int ret, i;
+ u8 ver, hdr;
+ u32 data;
+ struct dcb_output dcbE;
+ struct nvbios_connE connE;
if (func->wndw.cnt) {
disp->wndw.nr = func->wndw.cnt(disp, &disp->wndw.mask);
@@ -1610,8 +1638,130 @@ nv50_disp_oneinit(struct nvkm_disp *disp)
if (ret)
return ret;
- return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
- 0x1000, 0, disp->inst, &disp->ramht);
+ ret = nvkm_ramht_new(device, func->ramht_size ? func->ramht_size : 0x1000, 0, disp->inst,
+ &disp->ramht);
+ if (ret)
+ return ret;
+
+ /* Create output path objects for each VBIOS display path. */
+ i = -1;
+ while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
+ if (WARN_ON((ver & 0xf0) != 0x40))
+ return -EINVAL;
+ if (dcbE.type == DCB_OUTPUT_UNUSED)
+ continue;
+ if (dcbE.type == DCB_OUTPUT_EOL)
+ break;
+ outp = NULL;
+
+ switch (dcbE.type) {
+ case DCB_OUTPUT_ANALOG:
+ case DCB_OUTPUT_TMDS:
+ case DCB_OUTPUT_LVDS:
+ ret = nvkm_outp_new(disp, i, &dcbE, &outp);
+ break;
+ case DCB_OUTPUT_DP:
+ ret = nvkm_dp_new(disp, i, &dcbE, &outp);
+ break;
+ case DCB_OUTPUT_TV:
+ case DCB_OUTPUT_WFD:
+ /* No support for WFD yet. */
+ ret = -ENODEV;
+ continue;
+ default:
+ nvkm_warn(subdev, "dcb %d type %d unknown\n",
+ i, dcbE.type);
+ continue;
+ }
+
+ if (ret) {
+ if (outp) {
+ if (ret != -ENODEV)
+ OUTP_ERR(outp, "ctor failed: %d", ret);
+ else
+ OUTP_DBG(outp, "not supported");
+ nvkm_outp_del(&outp);
+ continue;
+ }
+ nvkm_error(subdev, "failed to create outp %d\n", i);
+ continue;
+ }
+
+ list_add_tail(&outp->head, &disp->outps);
+ }
+
+ /* Create connector objects based on available output paths. */
+ list_for_each_entry_safe(outp, outt, &disp->outps, head) {
+ /* VBIOS data *should* give us the most useful information. */
+ data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
+ &connE);
+
+ /* No bios connector data... */
+ if (!data) {
+ /* Heuristic: anything with the same ccb index is
+ * considered to be on the same connector, any
+ * output path without an associated ccb entry will
+ * be put on its own connector.
+ */
+ int ccb_index = outp->info.i2c_index;
+ if (ccb_index != 0xf) {
+ list_for_each_entry(pair, &disp->outps, head) {
+ if (pair->info.i2c_index == ccb_index) {
+ outp->conn = pair->conn;
+ break;
+ }
+ }
+ }
+
+ /* Connector shared with another output path. */
+ if (outp->conn)
+ continue;
+
+ memset(&connE, 0x00, sizeof(connE));
+ connE.type = DCB_CONNECTOR_NONE;
+ i = -1;
+ } else {
+ i = outp->info.connector;
+ }
+
+ /* Check that we haven't already created this connector. */
+ list_for_each_entry(conn, &disp->conns, head) {
+ if (conn->index == outp->info.connector) {
+ outp->conn = conn;
+ break;
+ }
+ }
+
+ if (outp->conn)
+ continue;
+
+ /* Apparently we need to create a new one! */
+ ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
+ if (ret) {
+ nvkm_error(subdev, "failed to create outp %d conn: %d\n", outp->index, ret);
+ nvkm_conn_del(&outp->conn);
+ list_del(&outp->head);
+ nvkm_outp_del(&outp);
+ continue;
+ }
+
+ list_add_tail(&outp->conn->head, &disp->conns);
+ }
+
+ /* Enforce identity-mapped SOR assignment for panels, which have
+ * certain bits (ie. backlight controls) wired to a specific SOR.
+ */
+ list_for_each_entry(outp, &disp->outps, head) {
+ if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
+ outp->conn->info.type == DCB_CONNECTOR_eDP) {
+ ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
+ if (!WARN_ON(!ior))
+ ior->identity = true;
+ outp->identity = true;
+ }
+ }
+
+ return 0;
}
static const struct nvkm_disp_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index 6094805fbd63..bfb2a4db8d64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -22,14 +22,16 @@
* Authors: Ben Skeggs
*/
#include "outp.h"
+#include "conn.h"
#include "dp.h"
#include "ior.h"
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
+#include <subdev/gpio.h>
#include <subdev/i2c.h>
-void
+static void
nvkm_outp_route(struct nvkm_disp *disp)
{
struct nvkm_outp *outp;
@@ -46,8 +48,8 @@ nvkm_outp_route(struct nvkm_disp *disp)
list_for_each_entry(ior, &disp->iors, head) {
if ((outp = ior->asy.outp)) {
- OUTP_DBG(outp, "acquire %s", ior->name);
if (ior->asy.outp != ior->arm.outp) {
+ OUTP_DBG(outp, "acquire %s", ior->name);
if (ior->func->route.set)
ior->func->route.set(outp, ior);
ior->arm.outp = ior->asy.outp;
@@ -87,22 +89,20 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type)
}
void
-nvkm_outp_release(struct nvkm_outp *outp, u8 user)
+nvkm_outp_release_or(struct nvkm_outp *outp, u8 user)
{
struct nvkm_ior *ior = outp->ior;
OUTP_TRACE(outp, "release %02x &= %02x %p", outp->acquired, ~user, ior);
if (ior) {
outp->acquired &= ~user;
if (!outp->acquired) {
- if (outp->func->release && outp->ior)
- outp->func->release(outp);
outp->ior->asy.outp = NULL;
outp->ior = NULL;
}
}
}
-static inline int
+int
nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior)
{
outp->ior = ior;
@@ -140,7 +140,7 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
}
int
-nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
+nvkm_outp_acquire_or(struct nvkm_outp *outp, u8 user, bool hda)
{
struct nvkm_ior *ior = outp->ior;
enum nvkm_ior_proto proto;
@@ -207,39 +207,110 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
return nvkm_outp_acquire_hda(outp, type, user, false);
}
+int
+nvkm_outp_bl_set(struct nvkm_outp *outp, int level)
+{
+ int ret;
+
+ ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_PRIV, false);
+ if (ret)
+ return ret;
+
+ if (outp->ior->func->bl)
+ ret = outp->ior->func->bl->set(outp->ior, level);
+ else
+ ret = -EINVAL;
+
+ nvkm_outp_release_or(outp, NVKM_OUTP_PRIV);
+ return ret;
+}
+
+int
+nvkm_outp_bl_get(struct nvkm_outp *outp)
+{
+ int ret;
+
+ ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_PRIV, false);
+ if (ret)
+ return ret;
+
+ if (outp->ior->func->bl)
+ ret = outp->ior->func->bl->get(outp->ior);
+ else
+ ret = -EINVAL;
+
+ nvkm_outp_release_or(outp, NVKM_OUTP_PRIV);
+ return ret;
+}
+
+int
+nvkm_outp_detect(struct nvkm_outp *outp)
+{
+ struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
+ int ret = -EINVAL;
+
+ if (outp->conn->info.hpd != DCB_GPIO_UNUSED) {
+ ret = nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, outp->conn->info.hpd);
+ if (ret < 0)
+ return ret;
+ if (ret)
+ return 1;
+
+ /*TODO: Look into returning NOT_PRESENT if !HPD on DVI/HDMI.
+ *
+ * It's uncertain whether this is accurate for all older chipsets,
+ * so we're returning UNKNOWN, and the DRM will probe DDC instead.
+ */
+ if (outp->info.type == DCB_OUTPUT_DP)
+ return 0;
+ }
+
+ return ret;
+}
+
void
-nvkm_outp_fini(struct nvkm_outp *outp)
+nvkm_outp_release(struct nvkm_outp *outp)
{
- if (outp->func->fini)
- outp->func->fini(outp);
+ nvkm_outp_release_or(outp, NVKM_OUTP_USER);
+ nvkm_outp_route(outp->disp);
}
-static void
-nvkm_outp_init_route(struct nvkm_outp *outp)
+int
+nvkm_outp_acquire(struct nvkm_outp *outp, bool hda)
+{
+ int ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_USER, hda);
+
+ if (ret)
+ return ret;
+
+ nvkm_outp_route(outp->disp);
+ return 0;
+}
+
+struct nvkm_ior *
+nvkm_outp_inherit(struct nvkm_outp *outp)
{
struct nvkm_disp *disp = outp->disp;
+ struct nvkm_ior *ior;
enum nvkm_ior_proto proto;
enum nvkm_ior_type type;
- struct nvkm_ior *ior;
int id, link;
/* Find any OR from the class that is able to support this device. */
proto = nvkm_outp_xlat(outp, &type);
if (proto == UNKNOWN)
- return;
+ return NULL;
ior = nvkm_ior_find(disp, type, -1);
- if (!ior) {
- WARN_ON(1);
- return;
- }
+ if (WARN_ON(!ior))
+ return NULL;
/* Determine the specific OR, if any, this device is attached to. */
if (ior->func->route.get) {
id = ior->func->route.get(outp, &link);
if (id < 0) {
OUTP_DBG(outp, "no route");
- return;
+ return NULL;
}
} else {
/* Prior to DCB 4.1, this is hardwired like so. */
@@ -248,10 +319,24 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
}
ior = nvkm_ior_find(disp, type, id);
- if (!ior) {
- WARN_ON(1);
+ if (WARN_ON(!ior))
+ return NULL;
+
+ return ior;
+}
+
+void
+nvkm_outp_init(struct nvkm_outp *outp)
+{
+ enum nvkm_ior_proto proto;
+ enum nvkm_ior_type type;
+ struct nvkm_ior *ior;
+
+ /* Find any OR from the class that is able to support this device. */
+ proto = nvkm_outp_xlat(outp, &type);
+ ior = outp->func->inherit(outp);
+ if (!ior)
return;
- }
/* Determine if the OR is already configured for this device. */
ior->func->state(ior, &ior->arm);
@@ -274,14 +359,6 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
}
void
-nvkm_outp_init(struct nvkm_outp *outp)
-{
- nvkm_outp_init_route(outp);
- if (outp->func->init)
- outp->func->init(outp);
-}
-
-void
nvkm_outp_del(struct nvkm_outp **poutp)
{
struct nvkm_outp *outp = *poutp;
@@ -328,6 +405,13 @@ nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
static const struct nvkm_outp_func
nvkm_outp = {
+ .init = nvkm_outp_init,
+ .detect = nvkm_outp_detect,
+ .inherit = nvkm_outp_inherit,
+ .acquire = nvkm_outp_acquire,
+ .release = nvkm_outp_release,
+ .bl.get = nvkm_outp_bl_get,
+ .bl.set = nvkm_outp_bl_set,
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index 4e7f873f66e2..ebd2f499b4b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -35,6 +35,8 @@ struct nvkm_outp {
struct {
struct nvbios_dpout info;
u8 version;
+ bool mst;
+ bool increased_wm;
struct nvkm_i2c_aux *aux;
@@ -50,14 +52,13 @@ struct nvkm_outp {
u32 rate;
} rate[8];
int rates;
- int links;
struct mutex mutex;
struct {
- atomic_t done;
u8 nr;
u8 bw;
bool mst;
+ bool post_adj;
} lt;
} dp;
};
@@ -74,17 +75,45 @@ int nvkm_outp_new(struct nvkm_disp *, int index, struct dcb_output *, struct nvk
void nvkm_outp_del(struct nvkm_outp **);
void nvkm_outp_init(struct nvkm_outp *);
void nvkm_outp_fini(struct nvkm_outp *);
-int nvkm_outp_acquire(struct nvkm_outp *, u8 user, bool hda);
-void nvkm_outp_release(struct nvkm_outp *, u8 user);
-void nvkm_outp_route(struct nvkm_disp *);
+
+int nvkm_outp_detect(struct nvkm_outp *);
+
+struct nvkm_ior *nvkm_outp_inherit(struct nvkm_outp *);
+int nvkm_outp_acquire(struct nvkm_outp *, bool hda);
+int nvkm_outp_acquire_or(struct nvkm_outp *, u8 user, bool hda);
+int nvkm_outp_acquire_ior(struct nvkm_outp *, u8 user, struct nvkm_ior *);
+void nvkm_outp_release(struct nvkm_outp *);
+void nvkm_outp_release_or(struct nvkm_outp *, u8 user);
+
+int nvkm_outp_bl_get(struct nvkm_outp *);
+int nvkm_outp_bl_set(struct nvkm_outp *, int level);
struct nvkm_outp_func {
void *(*dtor)(struct nvkm_outp *);
void (*init)(struct nvkm_outp *);
void (*fini)(struct nvkm_outp *);
- int (*acquire)(struct nvkm_outp *);
+
+ int (*detect)(struct nvkm_outp *);
+ int (*edid_get)(struct nvkm_outp *, u8 *data, u16 *size);
+
+ struct nvkm_ior *(*inherit)(struct nvkm_outp *);
+ int (*acquire)(struct nvkm_outp *, bool hda);
void (*release)(struct nvkm_outp *);
- void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
+
+ struct {
+ int (*get)(struct nvkm_outp *);
+ int (*set)(struct nvkm_outp *, int level);
+ } bl;
+
+ struct {
+ int (*aux_pwr)(struct nvkm_outp *, bool pu);
+ int (*aux_xfer)(struct nvkm_outp *, u8 type, u32 addr, u8 *data, u8 *size);
+ int (*rates)(struct nvkm_outp *);
+ int (*train)(struct nvkm_outp *, bool retrain);
+ int (*drive)(struct nvkm_outp *, u8 lanes, u8 pe[4], u8 vs[4]);
+ int (*mst_id_get)(struct nvkm_outp *, u32 *id);
+ int (*mst_id_put)(struct nvkm_outp *, u32 id);
+ } dp;
};
#define OUTP_MSG(o,l,f,a...) do { \
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
index f5242a672279..19f5d3a6035e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/tu102.c
@@ -88,6 +88,7 @@ tu102_sor = {
.state = gv100_sor_state,
.power = nv50_sor_power,
.clock = gf119_sor_clock,
+ .bl = &gt215_sor_bl,
.hdmi = &gv100_sor_hdmi,
.dp = &tu102_sor_dp,
.hda = &gv100_sor_hda,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
index 46b057fe1412..ff82bb248492 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uconn.c
@@ -109,46 +109,6 @@ nvkm_uconn_uevent(struct nvkm_object *object, void *argv, u32 argc, struct nvkm_
nvkm_uconn_uevent_gpio);
}
-static int
-nvkm_uconn_mthd_hpd_status(struct nvkm_conn *conn, void *argv, u32 argc)
-{
- struct nvkm_gpio *gpio = conn->disp->engine.subdev.device->gpio;
- union nvif_conn_hpd_status_args *args = argv;
-
- if (argc != sizeof(args->v0) || args->v0.version != 0)
- return -ENOSYS;
-
- args->v0.support = gpio && conn->info.hpd != DCB_GPIO_UNUSED;
- args->v0.present = 0;
-
- if (args->v0.support) {
- int ret = nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, conn->info.hpd);
-
- if (WARN_ON(ret < 0)) {
- args->v0.support = false;
- return 0;
- }
-
- args->v0.present = ret;
- }
-
- return 0;
-}
-
-static int
-nvkm_uconn_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
-{
- struct nvkm_conn *conn = nvkm_uconn(object);
-
- switch (mthd) {
- case NVIF_CONN_V0_HPD_STATUS: return nvkm_uconn_mthd_hpd_status(conn, argv, argc);
- default:
- break;
- }
-
- return -EINVAL;
-}
-
static void *
nvkm_uconn_dtor(struct nvkm_object *object)
{
@@ -164,7 +124,6 @@ nvkm_uconn_dtor(struct nvkm_object *object)
static const struct nvkm_object_func
nvkm_uconn = {
.dtor = nvkm_uconn_dtor,
- .mthd = nvkm_uconn_mthd,
.uevent = nvkm_uconn_uevent,
};
@@ -192,6 +151,32 @@ nvkm_uconn_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nv
ret = -EBUSY;
spin_lock(&disp->client.lock);
if (!conn->object.func) {
+ switch (conn->info.type) {
+ case DCB_CONNECTOR_VGA : args->v0.type = NVIF_CONN_V0_VGA; break;
+ case DCB_CONNECTOR_TV_0 :
+ case DCB_CONNECTOR_TV_1 :
+ case DCB_CONNECTOR_TV_3 : args->v0.type = NVIF_CONN_V0_TV; break;
+ case DCB_CONNECTOR_DMS59_0 :
+ case DCB_CONNECTOR_DMS59_1 :
+ case DCB_CONNECTOR_DVI_I : args->v0.type = NVIF_CONN_V0_DVI_I; break;
+ case DCB_CONNECTOR_DVI_D : args->v0.type = NVIF_CONN_V0_DVI_D; break;
+ case DCB_CONNECTOR_LVDS : args->v0.type = NVIF_CONN_V0_LVDS; break;
+ case DCB_CONNECTOR_LVDS_SPWG: args->v0.type = NVIF_CONN_V0_LVDS_SPWG; break;
+ case DCB_CONNECTOR_DMS59_DP0:
+ case DCB_CONNECTOR_DMS59_DP1:
+ case DCB_CONNECTOR_DP :
+ case DCB_CONNECTOR_mDP :
+ case DCB_CONNECTOR_USB_C : args->v0.type = NVIF_CONN_V0_DP; break;
+ case DCB_CONNECTOR_eDP : args->v0.type = NVIF_CONN_V0_EDP; break;
+ case DCB_CONNECTOR_HDMI_0 :
+ case DCB_CONNECTOR_HDMI_1 :
+ case DCB_CONNECTOR_HDMI_C : args->v0.type = NVIF_CONN_V0_HDMI; break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ break;
+ }
+
nvkm_object_ctor(&nvkm_uconn, oclass, &conn->object);
*pobject = &conn->object;
ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
index fc283a4a1522..e4279f1772a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/uoutp.c
@@ -25,6 +25,8 @@
#include "head.h"
#include "ior.h"
+#include <subdev/i2c.h>
+
#include <nvif/if0012.h>
static int
@@ -44,17 +46,121 @@ nvkm_uoutp_mthd_dp_mst_vcpi(struct nvkm_outp *outp, void *argv, u32 argc)
}
static int
-nvkm_uoutp_mthd_dp_retrain(struct nvkm_outp *outp, void *argv, u32 argc)
+nvkm_uoutp_mthd_dp_mst_id_put(struct nvkm_outp *outp, void *argv, u32 argc)
{
- union nvif_outp_dp_retrain_args *args = argv;
+ union nvif_outp_dp_mst_id_put_args *args = argv;
- if (argc != sizeof(args->vn))
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ if (!outp->func->dp.mst_id_put)
+ return -EINVAL;
+
+ return outp->func->dp.mst_id_put(outp, args->v0.id);
+}
+
+static int
+nvkm_uoutp_mthd_dp_mst_id_get(struct nvkm_outp *outp, void *argv, u32 argc)
+{
+ union nvif_outp_dp_mst_id_get_args *args = argv;
+
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ if (!outp->func->dp.mst_id_get)
+ return -EINVAL;
+
+ return outp->func->dp.mst_id_get(outp, &args->v0.id);
+}
+
+static int
+nvkm_uoutp_mthd_dp_sst(struct nvkm_outp *outp, void *argv, u32 argc)
+{
+ union nvif_outp_dp_sst_args *args = argv;
+ struct nvkm_disp *disp = outp->disp;
+ struct nvkm_ior *ior = outp->ior;
+
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
- if (!atomic_read(&outp->dp.lt.done))
+ if (!ior->func->dp || !nvkm_head_find(disp, args->v0.head))
+ return -EINVAL;
+ if (!ior->func->dp->sst)
return 0;
- return outp->func->acquire(outp);
+ return ior->func->dp->sst(ior, args->v0.head,
+ outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP,
+ args->v0.watermark, args->v0.hblanksym, args->v0.vblanksym);
+}
+
+static int
+nvkm_uoutp_mthd_dp_drive(struct nvkm_outp *outp, void *argv, u32 argc)
+{
+ union nvif_outp_dp_drive_args *args = argv;
+
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ if (!outp->func->dp.drive)
+ return -EINVAL;
+
+ return outp->func->dp.drive(outp, args->v0.lanes, args->v0.pe, args->v0.vs);
+}
+
+static int
+nvkm_uoutp_mthd_dp_train(struct nvkm_outp *outp, void *argv, u32 argc)
+{
+ union nvif_outp_dp_train_args *args = argv;
+
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ if (!outp->func->dp.train)
+ return -EINVAL;
+
+ if (!args->v0.retrain) {
+ memcpy(outp->dp.dpcd, args->v0.dpcd, sizeof(outp->dp.dpcd));
+ outp->dp.lttprs = args->v0.lttprs;
+ outp->dp.lt.nr = args->v0.link_nr;
+ outp->dp.lt.bw = args->v0.link_bw / 27000;
+ outp->dp.lt.mst = args->v0.mst;
+ outp->dp.lt.post_adj = args->v0.post_lt_adj;
+ }
+
+ return outp->func->dp.train(outp, args->v0.retrain);
+}
+
+static int
+nvkm_uoutp_mthd_dp_rates(struct nvkm_outp *outp, void *argv, u32 argc)
+{
+ union nvif_outp_dp_rates_args *args = argv;
+
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ if (args->v0.rates > ARRAY_SIZE(outp->dp.rate))
+ return -EINVAL;
+
+ for (int i = 0; i < args->v0.rates; i++) {
+ outp->dp.rate[i].dpcd = args->v0.rate[i].dpcd;
+ outp->dp.rate[i].rate = args->v0.rate[i].rate;
+ }
+
+ outp->dp.rates = args->v0.rates;
+
+ if (outp->func->dp.rates)
+ outp->func->dp.rates(outp);
+
+ return 0;
+}
+
+static int
+nvkm_uoutp_mthd_dp_aux_xfer(struct nvkm_outp *outp, void *argv, u32 argc)
+{
+ union nvif_outp_dp_aux_xfer_args *args = argv;
+
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ if (!outp->func->dp.aux_xfer)
+ return -EINVAL;
+
+ return outp->func->dp.aux_xfer(outp, args->v0.type, args->v0.addr,
+ args->v0.data, &args->v0.size);
}
static int
@@ -64,10 +170,10 @@ nvkm_uoutp_mthd_dp_aux_pwr(struct nvkm_outp *outp, void *argv, u32 argc)
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
+ if (!outp->func->dp.aux_pwr)
+ return -EINVAL;
- outp->dp.enabled = !!args->v0.state;
- nvkm_dp_enable(outp, outp->dp.enabled);
- return 0;
+ return outp->func->dp.aux_pwr(outp, !!args->v0.state);
}
static int
@@ -88,12 +194,20 @@ nvkm_uoutp_mthd_hda_eld(struct nvkm_outp *outp, void *argv, u32 argc)
if (argc && args->v0.data[0]) {
if (outp->info.type == DCB_OUTPUT_DP)
ior->func->dp->audio(ior, args->v0.head, true);
+ else
+ if (ior->func->hdmi->audio)
+ ior->func->hdmi->audio(ior, args->v0.head, true);
+
ior->func->hda->hpd(ior, args->v0.head, true);
ior->func->hda->eld(ior, args->v0.head, args->v0.data, argc);
} else {
+ ior->func->hda->hpd(ior, args->v0.head, false);
+
if (outp->info.type == DCB_OUTPUT_DP)
ior->func->dp->audio(ior, args->v0.head, false);
- ior->func->hda->hpd(ior, args->v0.head, false);
+ else
+ if (ior->func->hdmi->audio)
+ ior->func->hdmi->audio(ior, args->v0.head, false);
}
return 0;
@@ -126,84 +240,105 @@ nvkm_uoutp_mthd_infoframe(struct nvkm_outp *outp, void *argv, u32 argc)
}
static int
-nvkm_uoutp_mthd_release(struct nvkm_outp *outp, void *argv, u32 argc)
+nvkm_uoutp_mthd_hdmi(struct nvkm_outp *outp, void *argv, u32 argc)
{
- struct nvkm_head *head = outp->asy.head;
+ union nvif_outp_hdmi_args *args = argv;
struct nvkm_ior *ior = outp->ior;
- union nvif_outp_release_args *args = argv;
- if (argc != sizeof(args->vn))
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
- if (ior->func->hdmi && head) {
- ior->func->hdmi->infoframe_avi(ior, head->id, NULL, 0);
- ior->func->hdmi->infoframe_vsi(ior, head->id, NULL, 0);
- ior->func->hdmi->ctrl(ior, head->id, false, 0, 0);
+ if (!(outp->asy.head = nvkm_head_find(outp->disp, args->v0.head)))
+ return -EINVAL;
+
+ if (!ior->func->hdmi ||
+ args->v0.max_ac_packet > 0x1f ||
+ args->v0.rekey > 0x7f ||
+ (args->v0.scdc && !ior->func->hdmi->scdc))
+ return -EINVAL;
+
+ if (!args->v0.enable) {
+ ior->func->hdmi->infoframe_avi(ior, args->v0.head, NULL, 0);
+ ior->func->hdmi->infoframe_vsi(ior, args->v0.head, NULL, 0);
+ ior->func->hdmi->ctrl(ior, args->v0.head, false, 0, 0);
+ return 0;
}
- nvkm_outp_release(outp, NVKM_OUTP_USER);
+ ior->func->hdmi->ctrl(ior, args->v0.head, args->v0.enable,
+ args->v0.max_ac_packet, args->v0.rekey);
+ if (ior->func->hdmi->scdc)
+ ior->func->hdmi->scdc(ior, args->v0.khz, args->v0.scdc, args->v0.scdc_scrambling,
+ args->v0.scdc_low_rates);
+
return 0;
}
static int
-nvkm_uoutp_mthd_acquire_dp(struct nvkm_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
- u8 link_nr, u8 link_bw, bool hda, bool mst)
+nvkm_uoutp_mthd_lvds(struct nvkm_outp *outp, void *argv, u32 argc)
{
- int ret;
+ union nvif_outp_lvds_args *args = argv;
- ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, hda);
- if (ret)
- return ret;
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ if (outp->info.type != DCB_OUTPUT_LVDS)
+ return -EINVAL;
- memcpy(outp->dp.dpcd, dpcd, sizeof(outp->dp.dpcd));
- outp->dp.lt.nr = link_nr;
- outp->dp.lt.bw = link_bw;
- outp->dp.lt.mst = mst;
+ outp->lvds.dual = !!args->v0.dual;
+ outp->lvds.bpc8 = !!args->v0.bpc8;
return 0;
}
static int
-nvkm_uoutp_mthd_acquire_tmds(struct nvkm_outp *outp, u8 head, u8 hdmi, u8 hdmi_max_ac_packet,
- u8 hdmi_rekey, u8 hdmi_scdc, u8 hdmi_hda)
+nvkm_uoutp_mthd_bl_set(struct nvkm_outp *outp, void *argv, u32 argc)
{
- struct nvkm_ior *ior;
+ union nvif_outp_bl_get_args *args = argv;
int ret;
- if (!(outp->asy.head = nvkm_head_find(outp->disp, head)))
- return -EINVAL;
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
- ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, hdmi && hdmi_hda);
- if (ret)
- return ret;
+ if (outp->func->bl.set)
+ ret = outp->func->bl.set(outp, args->v0.level);
+ else
+ ret = -EINVAL;
- ior = outp->ior;
+ return ret;
+}
- if (hdmi) {
- if (!ior->func->hdmi ||
- hdmi_max_ac_packet > 0x1f || hdmi_rekey > 0x7f ||
- (hdmi_scdc && !ior->func->hdmi->scdc)) {
- nvkm_outp_release(outp, NVKM_OUTP_USER);
- return -EINVAL;
- }
+static int
+nvkm_uoutp_mthd_bl_get(struct nvkm_outp *outp, void *argv, u32 argc)
+{
+ union nvif_outp_bl_get_args *args = argv;
+ int ret;
+
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
- ior->func->hdmi->ctrl(ior, head, hdmi, hdmi_max_ac_packet, hdmi_rekey);
- if (ior->func->hdmi->scdc)
- ior->func->hdmi->scdc(ior, hdmi_scdc);
+ if (outp->func->bl.get) {
+ ret = outp->func->bl.get(outp);
+ if (ret >= 0) {
+ args->v0.level = ret;
+ ret = 0;
+ }
+ } else {
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static int
-nvkm_uoutp_mthd_acquire_lvds(struct nvkm_outp *outp, bool dual, bool bpc8)
+nvkm_uoutp_mthd_release(struct nvkm_outp *outp, void *argv, u32 argc)
{
- if (outp->info.type != DCB_OUTPUT_LVDS)
- return -EINVAL;
+ union nvif_outp_release_args *args = argv;
- outp->lvds.dual = dual;
- outp->lvds.bpc8 = bpc8;
+ if (argc != sizeof(args->vn))
+ return -ENOSYS;
+ if (!outp->ior)
+ return -EINVAL;
- return nvkm_outp_acquire(outp, NVKM_OUTP_USER, false);
+ outp->func->release(outp);
+ return 0;
}
static int
@@ -214,30 +349,16 @@ nvkm_uoutp_mthd_acquire(struct nvkm_outp *outp, void *argv, u32 argc)
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
- if (outp->ior)
+ if (outp->ior && args->v0.type <= NVIF_OUTP_ACQUIRE_V0_PIOR)
return -EBUSY;
- switch (args->v0.proto) {
- case NVIF_OUTP_ACQUIRE_V0_RGB_CRT:
- ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, false);
+ switch (args->v0.type) {
+ case NVIF_OUTP_ACQUIRE_V0_DAC:
+ case NVIF_OUTP_ACQUIRE_V0_PIOR:
+ ret = outp->func->acquire(outp, false);
break;
- case NVIF_OUTP_ACQUIRE_V0_TMDS:
- ret = nvkm_uoutp_mthd_acquire_tmds(outp, args->v0.tmds.head,
- args->v0.tmds.hdmi,
- args->v0.tmds.hdmi_max_ac_packet,
- args->v0.tmds.hdmi_rekey,
- args->v0.tmds.hdmi_scdc,
- args->v0.tmds.hdmi_hda);
- break;
- case NVIF_OUTP_ACQUIRE_V0_LVDS:
- ret = nvkm_uoutp_mthd_acquire_lvds(outp, args->v0.lvds.dual, args->v0.lvds.bpc8);
- break;
- case NVIF_OUTP_ACQUIRE_V0_DP:
- ret = nvkm_uoutp_mthd_acquire_dp(outp, args->v0.dp.dpcd,
- args->v0.dp.link_nr,
- args->v0.dp.link_bw,
- args->v0.dp.hda != 0,
- args->v0.dp.mst != 0);
+ case NVIF_OUTP_ACQUIRE_V0_SOR:
+ ret = outp->func->acquire(outp, args->v0.sor.hda);
break;
default:
ret = -EINVAL;
@@ -253,6 +374,69 @@ nvkm_uoutp_mthd_acquire(struct nvkm_outp *outp, void *argv, u32 argc)
}
static int
+nvkm_uoutp_mthd_inherit(struct nvkm_outp *outp, void *argv, u32 argc)
+{
+ union nvif_outp_inherit_args *args = argv;
+ struct nvkm_ior *ior;
+ int ret = 0;
+
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+
+ /* Ensure an ior is hooked up to this outp already */
+ ior = outp->func->inherit(outp);
+ if (!ior)
+ return -ENODEV;
+
+ /* With iors, there will be a separate output path for each type of connector - and all of
+ * them will appear to be hooked up. Figure out which one is actually the one we're using
+ * based on the protocol we were given over nvif
+ */
+ switch (args->v0.proto) {
+ case NVIF_OUTP_INHERIT_V0_TMDS:
+ if (ior->arm.proto != TMDS)
+ return -ENODEV;
+ break;
+ case NVIF_OUTP_INHERIT_V0_DP:
+ if (ior->arm.proto != DP)
+ return -ENODEV;
+ break;
+ case NVIF_OUTP_INHERIT_V0_LVDS:
+ if (ior->arm.proto != LVDS)
+ return -ENODEV;
+ break;
+ case NVIF_OUTP_INHERIT_V0_TV:
+ if (ior->arm.proto != TV)
+ return -ENODEV;
+ break;
+ case NVIF_OUTP_INHERIT_V0_RGB_CRT:
+ if (ior->arm.proto != CRT)
+ return -ENODEV;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ /* Make sure that userspace hasn't already acquired this */
+ if (outp->acquired) {
+ OUTP_ERR(outp, "cannot inherit an already acquired (%02x) outp", outp->acquired);
+ return -EBUSY;
+ }
+
+ /* Mark the outp acquired by userspace now that we've confirmed it's already active */
+ OUTP_TRACE(outp, "inherit %02x |= %02x %p", outp->acquired, NVKM_OUTP_USER, ior);
+ nvkm_outp_acquire_ior(outp, NVKM_OUTP_USER, ior);
+
+ args->v0.or = ior->id;
+ args->v0.link = ior->arm.link;
+ args->v0.head = ffs(ior->arm.head) - 1;
+ args->v0.proto = ior->arm.proto_evo;
+
+ return ret;
+}
+
+static int
nvkm_uoutp_mthd_load_detect(struct nvkm_outp *outp, void *argv, u32 argc)
{
union nvif_outp_load_detect_args *args = argv;
@@ -261,7 +445,7 @@ nvkm_uoutp_mthd_load_detect(struct nvkm_outp *outp, void *argv, u32 argc)
if (argc != sizeof(args->v0) || args->v0.version != 0)
return -ENOSYS;
- ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV, false);
+ ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_PRIV, false);
if (ret == 0) {
if (outp->ior->func->sense) {
ret = outp->ior->func->sense(outp->ior, args->v0.data);
@@ -269,21 +453,64 @@ nvkm_uoutp_mthd_load_detect(struct nvkm_outp *outp, void *argv, u32 argc)
} else {
ret = -EINVAL;
}
- nvkm_outp_release(outp, NVKM_OUTP_PRIV);
+ nvkm_outp_release_or(outp, NVKM_OUTP_PRIV);
}
return ret;
}
static int
+nvkm_uoutp_mthd_edid_get(struct nvkm_outp *outp, void *argv, u32 argc)
+{
+ union nvif_outp_edid_get_args *args = argv;
+
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ if (!outp->func->edid_get)
+ return -EINVAL;
+
+ args->v0.size = ARRAY_SIZE(args->v0.data);
+ return outp->func->edid_get(outp, args->v0.data, &args->v0.size);
+}
+
+static int
+nvkm_uoutp_mthd_detect(struct nvkm_outp *outp, void *argv, u32 argc)
+{
+ union nvif_outp_detect_args *args = argv;
+ int ret;
+
+ if (argc != sizeof(args->v0) || args->v0.version != 0)
+ return -ENOSYS;
+ if (!outp->func->detect)
+ return -EINVAL;
+
+ ret = outp->func->detect(outp);
+ switch (ret) {
+ case 0: args->v0.status = NVIF_OUTP_DETECT_V0_NOT_PRESENT; break;
+ case 1: args->v0.status = NVIF_OUTP_DETECT_V0_PRESENT; break;
+ default:
+ args->v0.status = NVIF_OUTP_DETECT_V0_UNKNOWN;
+ break;
+ }
+
+ return 0;
+}
+
+static int
nvkm_uoutp_mthd_acquired(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc)
{
switch (mthd) {
- case NVIF_OUTP_V0_RELEASE : return nvkm_uoutp_mthd_release (outp, argv, argc);
- case NVIF_OUTP_V0_INFOFRAME : return nvkm_uoutp_mthd_infoframe (outp, argv, argc);
- case NVIF_OUTP_V0_HDA_ELD : return nvkm_uoutp_mthd_hda_eld (outp, argv, argc);
- case NVIF_OUTP_V0_DP_RETRAIN : return nvkm_uoutp_mthd_dp_retrain (outp, argv, argc);
- case NVIF_OUTP_V0_DP_MST_VCPI: return nvkm_uoutp_mthd_dp_mst_vcpi(outp, argv, argc);
+ case NVIF_OUTP_V0_RELEASE : return nvkm_uoutp_mthd_release (outp, argv, argc);
+ case NVIF_OUTP_V0_LVDS : return nvkm_uoutp_mthd_lvds (outp, argv, argc);
+ case NVIF_OUTP_V0_HDMI : return nvkm_uoutp_mthd_hdmi (outp, argv, argc);
+ case NVIF_OUTP_V0_INFOFRAME : return nvkm_uoutp_mthd_infoframe (outp, argv, argc);
+ case NVIF_OUTP_V0_HDA_ELD : return nvkm_uoutp_mthd_hda_eld (outp, argv, argc);
+ case NVIF_OUTP_V0_DP_TRAIN : return nvkm_uoutp_mthd_dp_train (outp, argv, argc);
+ case NVIF_OUTP_V0_DP_DRIVE : return nvkm_uoutp_mthd_dp_drive (outp, argv, argc);
+ case NVIF_OUTP_V0_DP_SST : return nvkm_uoutp_mthd_dp_sst (outp, argv, argc);
+ case NVIF_OUTP_V0_DP_MST_ID_GET: return nvkm_uoutp_mthd_dp_mst_id_get(outp, argv, argc);
+ case NVIF_OUTP_V0_DP_MST_ID_PUT: return nvkm_uoutp_mthd_dp_mst_id_put(outp, argv, argc);
+ case NVIF_OUTP_V0_DP_MST_VCPI : return nvkm_uoutp_mthd_dp_mst_vcpi (outp, argv, argc);
default:
break;
}
@@ -292,17 +519,25 @@ nvkm_uoutp_mthd_acquired(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc)
}
static int
-nvkm_uoutp_mthd_noacquire(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc)
+nvkm_uoutp_mthd_noacquire(struct nvkm_outp *outp, u32 mthd, void *argv, u32 argc, bool *invalid)
{
switch (mthd) {
- case NVIF_OUTP_V0_LOAD_DETECT: return nvkm_uoutp_mthd_load_detect(outp, argv, argc);
+ case NVIF_OUTP_V0_DETECT : return nvkm_uoutp_mthd_detect (outp, argv, argc);
+ case NVIF_OUTP_V0_EDID_GET : return nvkm_uoutp_mthd_edid_get (outp, argv, argc);
+ case NVIF_OUTP_V0_INHERIT : return nvkm_uoutp_mthd_inherit (outp, argv, argc);
case NVIF_OUTP_V0_ACQUIRE : return nvkm_uoutp_mthd_acquire (outp, argv, argc);
+ case NVIF_OUTP_V0_LOAD_DETECT: return nvkm_uoutp_mthd_load_detect(outp, argv, argc);
+ case NVIF_OUTP_V0_BL_GET : return nvkm_uoutp_mthd_bl_get (outp, argv, argc);
+ case NVIF_OUTP_V0_BL_SET : return nvkm_uoutp_mthd_bl_set (outp, argv, argc);
case NVIF_OUTP_V0_DP_AUX_PWR : return nvkm_uoutp_mthd_dp_aux_pwr (outp, argv, argc);
+ case NVIF_OUTP_V0_DP_AUX_XFER: return nvkm_uoutp_mthd_dp_aux_xfer(outp, argv, argc);
+ case NVIF_OUTP_V0_DP_RATES : return nvkm_uoutp_mthd_dp_rates (outp, argv, argc);
default:
break;
}
- return 1;
+ *invalid = true;
+ return 0;
}
static int
@@ -310,12 +545,13 @@ nvkm_uoutp_mthd(struct nvkm_object *object, u32 mthd, void *argv, u32 argc)
{
struct nvkm_outp *outp = nvkm_uoutp(object);
struct nvkm_disp *disp = outp->disp;
+ bool invalid = false;
int ret;
mutex_lock(&disp->super.mutex);
- ret = nvkm_uoutp_mthd_noacquire(outp, mthd, argv, argc);
- if (ret <= 0)
+ ret = nvkm_uoutp_mthd_noacquire(outp, mthd, argv, argc, &invalid);
+ if (!invalid)
goto done;
if (outp->ior)
@@ -370,10 +606,60 @@ nvkm_uoutp_new(const struct nvkm_oclass *oclass, void *argv, u32 argc, struct nv
ret = -EBUSY;
spin_lock(&disp->client.lock);
if (!outp->object.func) {
+ switch (outp->info.type) {
+ case DCB_OUTPUT_ANALOG:
+ args->v0.type = NVIF_OUTP_V0_TYPE_DAC;
+ args->v0.proto = NVIF_OUTP_V0_PROTO_RGB_CRT;
+ args->v0.rgb_crt.freq_max = outp->info.crtconf.maxfreq;
+ break;
+ case DCB_OUTPUT_TMDS:
+ if (!outp->info.location) {
+ args->v0.type = NVIF_OUTP_V0_TYPE_SOR;
+ args->v0.tmds.dual = (outp->info.tmdsconf.sor.link == 3);
+ } else {
+ args->v0.type = NVIF_OUTP_V0_TYPE_PIOR;
+ args->v0.tmds.dual = 0;
+ }
+ args->v0.proto = NVIF_OUTP_V0_PROTO_TMDS;
+ break;
+ case DCB_OUTPUT_LVDS:
+ args->v0.type = NVIF_OUTP_V0_TYPE_SOR;
+ args->v0.proto = NVIF_OUTP_V0_PROTO_LVDS;
+ args->v0.lvds.acpi_edid = outp->info.lvdsconf.use_acpi_for_edid;
+ break;
+ case DCB_OUTPUT_DP:
+ if (!outp->info.location) {
+ args->v0.type = NVIF_OUTP_V0_TYPE_SOR;
+ args->v0.dp.aux = outp->info.i2c_index;
+ } else {
+ args->v0.type = NVIF_OUTP_V0_TYPE_PIOR;
+ args->v0.dp.aux = NVKM_I2C_AUX_EXT(outp->info.extdev);
+ }
+ args->v0.proto = NVIF_OUTP_V0_PROTO_DP;
+ args->v0.dp.mst = outp->dp.mst;
+ args->v0.dp.increased_wm = outp->dp.increased_wm;
+ args->v0.dp.link_nr = outp->info.dpconf.link_nr;
+ args->v0.dp.link_bw = outp->info.dpconf.link_bw * 27000;
+ break;
+ default:
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto done;
+ }
+
+ if (outp->info.location)
+ args->v0.ddc = NVKM_I2C_BUS_EXT(outp->info.extdev);
+ else
+ args->v0.ddc = outp->info.i2c_index;
+ args->v0.heads = outp->info.heads;
+ args->v0.conn = outp->info.connector;
+
nvkm_object_ctor(&nvkm_uoutp, oclass, &outp->object);
*pobject = &outp->object;
ret = 0;
}
+
+done:
spin_unlock(&disp->client.lock);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 3648868bb9fc..c494a1ff2d57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -2032,18 +2032,18 @@ gf100_gr_oneinit(struct nvkm_gr *base)
}
/* Allocate global context buffers. */
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gr->func->grctx->pagepool_size,
- 0x100, false, &gr->pagepool);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST_SR_LOST,
+ gr->func->grctx->pagepool_size, 0x100, false, &gr->pagepool);
if (ret)
return ret;
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gr->func->grctx->bundle_size,
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST_SR_LOST, gr->func->grctx->bundle_size,
0x100, false, &gr->bundle_cb);
if (ret)
return ret;
- ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, gr->func->grctx->attrib_cb_size(gr),
- 0x1000, false, &gr->attrib_cb);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST_SR_LOST,
+ gr->func->grctx->attrib_cb_size(gr), 0x1000, false, &gr->attrib_cb);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 8fe0444f761e..131db2645f84 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -462,7 +462,7 @@ nvkm_perfmon_mthd_query_domain(struct nvkm_perfmon *perfmon,
args->v0.id = di;
args->v0.signal_nr = nvkm_perfdom_count_perfsig(dom);
- strncpy(args->v0.name, dom->name, sizeof(args->v0.name) - 1);
+ strscpy(args->v0.name, dom->name, sizeof(args->v0.name));
/* Currently only global counters (PCOUNTER) are implemented
* but this will be different for local counters (MP). */
@@ -513,8 +513,7 @@ nvkm_perfmon_mthd_query_signal(struct nvkm_perfmon *perfmon,
snprintf(args->v0.name, sizeof(args->v0.name),
"/%s/%02x", dom->name, si);
} else {
- strncpy(args->v0.name, sig->name,
- sizeof(args->v0.name) - 1);
+ strscpy(args->v0.name, sig->name, sizeof(args->v0.name));
}
args->v0.signal = si;
@@ -572,7 +571,7 @@ nvkm_perfmon_mthd_query_source(struct nvkm_perfmon *perfmon,
args->v0.source = sig->source[si];
args->v0.mask = src->mask;
- strncpy(args->v0.name, src->name, sizeof(args->v0.name) - 1);
+ strscpy(args->v0.name, src->name, sizeof(args->v0.name));
}
if (++si < source_nr) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
index 81a1ad2c88a7..40997ad1d101 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/tu102.c
@@ -83,17 +83,9 @@ tu102_devinit_wait(struct nvkm_device *device)
}
int
-tu102_devinit_post(struct nvkm_devinit *base, bool post)
+tu102_devinit_post(struct nvkm_devinit *init, bool post)
{
- struct nv50_devinit *init = nv50_devinit(base);
- int ret;
-
- ret = tu102_devinit_wait(init->base.subdev.device);
- if (ret)
- return ret;
-
- gm200_devinit_preos(init, post);
- return 0;
+ return tu102_devinit_wait(init->subdev.device);
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
index e0e4f97be029..24886eabe8dc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/base.c
@@ -94,15 +94,21 @@ nvkm_instobj_wrap(struct nvkm_device *device,
struct nvkm_memory *memory, struct nvkm_memory **pmemory)
{
struct nvkm_instmem *imem = device->imem;
+ int ret;
if (!imem->func->memory_wrap)
return -ENOSYS;
- return imem->func->memory_wrap(imem, memory, pmemory);
+ ret = imem->func->memory_wrap(imem, memory, pmemory);
+ if (ret)
+ return ret;
+
+ container_of(*pmemory, struct nvkm_instobj, memory)->preserve = true;
+ return 0;
}
int
-nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
+nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero, bool preserve,
struct nvkm_memory **pmemory)
{
struct nvkm_subdev *subdev = &imem->subdev;
@@ -130,6 +136,7 @@ nvkm_instobj_new(struct nvkm_instmem *imem, u32 size, u32 align, bool zero,
nvkm_done(memory);
}
+ container_of(memory, struct nvkm_instobj, memory)->preserve = preserve;
done:
if (ret)
nvkm_memory_unref(&memory);
@@ -176,9 +183,11 @@ nvkm_instmem_fini(struct nvkm_subdev *subdev, bool suspend)
if (suspend) {
list_for_each_entry(iobj, &imem->list, head) {
- int ret = nvkm_instobj_save(iobj);
- if (ret)
- return ret;
+ if (iobj->preserve) {
+ int ret = nvkm_instobj_save(iobj);
+ if (ret)
+ return ret;
+ }
}
nvkm_bar_bar2_fini(subdev->device);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
index fe92986a3885..390ca00ab567 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/priv.h
@@ -25,6 +25,7 @@ void nvkm_instmem_boot(struct nvkm_instmem *);
struct nvkm_instobj {
struct nvkm_memory memory;
struct list_head head;
+ bool preserve;
u32 *suspend;
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index f3630d0e0d55..bddac77f48f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -558,7 +558,7 @@ gp100_vmm_invalidate_pdb(struct nvkm_vmm *vmm, u64 addr)
void
gp100_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
- u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
+ u32 type = 0;
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
type |= 0x00000004; /* HUB_ONLY */
type |= 0x00000001; /* PAGE_ALL */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
index 6cb5eefa45e9..0095d58d4d9a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmtu102.c
@@ -27,7 +27,7 @@ static void
tu102_vmm_flush(struct nvkm_vmm *vmm, int depth)
{
struct nvkm_device *device = vmm->mmu->subdev.device;
- u32 type = (5 /* CACHE_LEVEL_UP_TO_PDE3 */ - depth) << 24;
+ u32 type = 0;
type |= 0x00000001; /* PAGE_ALL */
if (atomic_read(&vmm->engref[NVKM_SUBDEV_BAR]))
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index afeeb7737552..b2835b3ea6f5 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -817,6 +817,13 @@ static void pdev_remove(struct platform_device *pdev)
kfree(priv);
}
+static void pdev_shutdown(struct platform_device *pdev)
+{
+ struct omap_drm_private *priv = platform_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(priv->ddev);
+}
+
#ifdef CONFIG_PM_SLEEP
static int omap_drm_suspend(struct device *dev)
{
@@ -846,6 +853,7 @@ static struct platform_driver pdev = {
},
.probe = pdev_probe,
.remove_new = pdev_remove,
+ .shutdown = pdev_shutdown,
};
static struct platform_driver * const drivers[] = {
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
index 075a7af81eff..bcaa63d1955f 100644
--- a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -16,7 +16,6 @@ struct tm5p5_nt35596 {
struct mipi_dsi_device *dsi;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
- bool prepared;
};
static inline struct tm5p5_nt35596 *to_tm5p5_nt35596(struct drm_panel *panel)
@@ -112,9 +111,6 @@ static int tm5p5_nt35596_prepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (ctx->prepared)
- return 0;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0) {
dev_err(dev, "Failed to enable regulators: %d\n", ret);
@@ -132,7 +128,6 @@ static int tm5p5_nt35596_prepare(struct drm_panel *panel)
return ret;
}
- ctx->prepared = true;
return 0;
}
@@ -142,9 +137,6 @@ static int tm5p5_nt35596_unprepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = tm5p5_nt35596_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
@@ -153,7 +145,6 @@ static int tm5p5_nt35596_unprepare(struct drm_panel *panel)
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
ctx->supplies);
- ctx->prepared = false;
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
index 90098b753e3b..e77db8597eb7 100644
--- a/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
+++ b/drivers/gpu/drm/panel/panel-boe-bf060y8m-aj0.c
@@ -34,7 +34,6 @@ struct boe_bf060y8m_aj0 {
struct mipi_dsi_device *dsi;
struct regulator_bulk_data vregs[BF060Y8M_VREG_MAX];
struct gpio_desc *reset_gpio;
- bool prepared;
};
static inline
@@ -129,9 +128,6 @@ static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
struct device *dev = &boe->dsi->dev;
int ret;
- if (boe->prepared)
- return 0;
-
/*
* Enable EL Driving Voltage first - doing that at the beginning
* or at the end of the power sequence doesn't matter, so enable
@@ -166,7 +162,6 @@ static int boe_bf060y8m_aj0_prepare(struct drm_panel *panel)
return ret;
}
- boe->prepared = true;
return 0;
err_vci:
@@ -186,9 +181,6 @@ static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel)
struct device *dev = &boe->dsi->dev;
int ret;
- if (!boe->prepared)
- return 0;
-
ret = boe_bf060y8m_aj0_off(boe);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
@@ -196,7 +188,6 @@ static int boe_bf060y8m_aj0_unprepare(struct drm_panel *panel)
gpiod_set_value_cansleep(boe->reset_gpio, 1);
ret = regulator_bulk_disable(ARRAY_SIZE(boe->vregs), boe->vregs);
- boe->prepared = false;
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
index 8912757a6f42..3e0a8e0d58a0 100644
--- a/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
+++ b/drivers/gpu/drm/panel/panel-jdi-fhd-r63452.c
@@ -21,7 +21,6 @@ struct jdi_fhd_r63452 {
struct drm_panel panel;
struct mipi_dsi_device *dsi;
struct gpio_desc *reset_gpio;
- bool prepared;
};
static inline struct jdi_fhd_r63452 *to_jdi_fhd_r63452(struct drm_panel *panel)
@@ -157,9 +156,6 @@ static int jdi_fhd_r63452_prepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (ctx->prepared)
- return 0;
-
jdi_fhd_r63452_reset(ctx);
ret = jdi_fhd_r63452_on(ctx);
@@ -169,7 +165,6 @@ static int jdi_fhd_r63452_prepare(struct drm_panel *panel)
return ret;
}
- ctx->prepared = true;
return 0;
}
@@ -179,16 +174,12 @@ static int jdi_fhd_r63452_unprepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = jdi_fhd_r63452_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
- ctx->prepared = false;
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35950.c b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
index 412ca84d0581..648ce9201426 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35950.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35950.c
@@ -59,7 +59,6 @@ struct nt35950 {
int cur_mode;
u8 last_page;
- bool prepared;
};
struct nt35950_panel_mode {
@@ -431,9 +430,6 @@ static int nt35950_prepare(struct drm_panel *panel)
struct device *dev = &nt->dsi[0]->dev;
int ret;
- if (nt->prepared)
- return 0;
-
ret = regulator_enable(nt->vregs[0].consumer);
if (ret)
return ret;
@@ -460,7 +456,6 @@ static int nt35950_prepare(struct drm_panel *panel)
dev_err(dev, "Failed to initialize panel: %d\n", ret);
goto end;
}
- nt->prepared = true;
end:
if (ret < 0) {
@@ -477,9 +472,6 @@ static int nt35950_unprepare(struct drm_panel *panel)
struct device *dev = &nt->dsi[0]->dev;
int ret;
- if (!nt->prepared)
- return 0;
-
ret = nt35950_off(nt);
if (ret < 0)
dev_err(dev, "Failed to deinitialize panel: %d\n", ret);
@@ -487,7 +479,6 @@ static int nt35950_unprepare(struct drm_panel *panel)
gpiod_set_value_cansleep(nt->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(nt->vregs), nt->vregs);
- nt->prepared = false;
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
index 9632b9e95b71..9b9a7eb1bc60 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c
@@ -38,8 +38,6 @@ struct panel_info {
struct gpio_desc *reset_gpio;
struct backlight_device *backlight;
struct regulator *vddio;
-
- bool prepared;
};
struct panel_desc {
@@ -1046,9 +1044,6 @@ static int nt36523_prepare(struct drm_panel *panel)
struct panel_info *pinfo = to_panel_info(panel);
int ret;
- if (pinfo->prepared)
- return 0;
-
ret = regulator_enable(pinfo->vddio);
if (ret) {
dev_err(panel->dev, "failed to enable vddio regulator: %d\n", ret);
@@ -1064,8 +1059,6 @@ static int nt36523_prepare(struct drm_panel *panel)
return ret;
}
- pinfo->prepared = true;
-
return 0;
}
@@ -1095,14 +1088,9 @@ static int nt36523_unprepare(struct drm_panel *panel)
{
struct panel_info *pinfo = to_panel_info(panel);
- if (!pinfo->prepared)
- return 0;
-
gpiod_set_value_cansleep(pinfo->reset_gpio, 1);
regulator_disable(pinfo->vddio);
- pinfo->prepared = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 898b892f1143..93183f30d7d6 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -70,7 +70,6 @@ struct otm8009a {
struct gpio_desc *reset_gpio;
struct regulator *supply;
bool prepared;
- bool enabled;
};
static const struct drm_display_mode modes[] = {
@@ -267,9 +266,6 @@ static int otm8009a_disable(struct drm_panel *panel)
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
- if (!ctx->enabled)
- return 0; /* This is not an issue so we return 0 here */
-
backlight_disable(ctx->bl_dev);
ret = mipi_dsi_dcs_set_display_off(dsi);
@@ -282,8 +278,6 @@ static int otm8009a_disable(struct drm_panel *panel)
msleep(120);
- ctx->enabled = false;
-
return 0;
}
@@ -291,9 +285,6 @@ static int otm8009a_unprepare(struct drm_panel *panel)
{
struct otm8009a *ctx = panel_to_otm8009a(panel);
- if (!ctx->prepared)
- return 0;
-
if (ctx->reset_gpio) {
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
msleep(20);
@@ -311,9 +302,6 @@ static int otm8009a_prepare(struct drm_panel *panel)
struct otm8009a *ctx = panel_to_otm8009a(panel);
int ret;
- if (ctx->prepared)
- return 0;
-
ret = regulator_enable(ctx->supply);
if (ret < 0) {
dev_err(panel->dev, "failed to enable supply: %d\n", ret);
@@ -341,13 +329,8 @@ static int otm8009a_enable(struct drm_panel *panel)
{
struct otm8009a *ctx = panel_to_otm8009a(panel);
- if (ctx->enabled)
- return 0;
-
backlight_enable(ctx->bl_dev);
- ctx->enabled = true;
-
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 5f9b340588fb..7b7fe987e292 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -77,8 +77,6 @@ struct rm68200 {
struct drm_panel panel;
struct gpio_desc *reset_gpio;
struct regulator *supply;
- bool prepared;
- bool enabled;
};
static const struct drm_display_mode default_mode = {
@@ -231,27 +229,12 @@ static void rm68200_init_sequence(struct rm68200 *ctx)
dcs_write_seq(ctx, MCS_CMD_MODE_SW, MCS_CMD1_UCS);
}
-static int rm68200_disable(struct drm_panel *panel)
-{
- struct rm68200 *ctx = panel_to_rm68200(panel);
-
- if (!ctx->enabled)
- return 0;
-
- ctx->enabled = false;
-
- return 0;
-}
-
static int rm68200_unprepare(struct drm_panel *panel)
{
struct rm68200 *ctx = panel_to_rm68200(panel);
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret)
dev_warn(panel->dev, "failed to set display off: %d\n", ret);
@@ -269,8 +252,6 @@ static int rm68200_unprepare(struct drm_panel *panel)
regulator_disable(ctx->supply);
- ctx->prepared = false;
-
return 0;
}
@@ -280,9 +261,6 @@ static int rm68200_prepare(struct drm_panel *panel)
struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
int ret;
- if (ctx->prepared)
- return 0;
-
ret = regulator_enable(ctx->supply);
if (ret < 0) {
dev_err(ctx->dev, "failed to enable supply: %d\n", ret);
@@ -310,20 +288,6 @@ static int rm68200_prepare(struct drm_panel *panel)
msleep(20);
- ctx->prepared = true;
-
- return 0;
-}
-
-static int rm68200_enable(struct drm_panel *panel)
-{
- struct rm68200 *ctx = panel_to_rm68200(panel);
-
- if (ctx->enabled)
- return 0;
-
- ctx->enabled = true;
-
return 0;
}
@@ -352,10 +316,8 @@ static int rm68200_get_modes(struct drm_panel *panel,
}
static const struct drm_panel_funcs rm68200_drm_funcs = {
- .disable = rm68200_disable,
.unprepare = rm68200_unprepare,
.prepare = rm68200_prepare,
- .enable = rm68200_enable,
.get_modes = rm68200_get_modes,
};
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index b34fa4d5de07..a0e5698275a5 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -270,9 +270,6 @@ struct s6e63m0 {
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
- bool prepared;
- bool enabled;
-
/*
* This field is tested by functions directly accessing bus before
* transfer, transfer is skipped if it is set. In case of transfer
@@ -502,9 +499,6 @@ static int s6e63m0_disable(struct drm_panel *panel)
{
struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
- if (!ctx->enabled)
- return 0;
-
backlight_disable(ctx->bl_dev);
s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF);
@@ -512,8 +506,6 @@ static int s6e63m0_disable(struct drm_panel *panel)
s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE);
msleep(120);
- ctx->enabled = false;
-
return 0;
}
@@ -522,17 +514,12 @@ static int s6e63m0_unprepare(struct drm_panel *panel)
struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
int ret;
- if (!ctx->prepared)
- return 0;
-
s6e63m0_clear_error(ctx);
ret = s6e63m0_power_off(ctx);
if (ret < 0)
return ret;
- ctx->prepared = false;
-
return 0;
}
@@ -541,9 +528,6 @@ static int s6e63m0_prepare(struct drm_panel *panel)
struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
int ret;
- if (ctx->prepared)
- return 0;
-
ret = s6e63m0_power_on(ctx);
if (ret < 0)
return ret;
@@ -564,8 +548,6 @@ static int s6e63m0_prepare(struct drm_panel *panel)
if (ret < 0)
s6e63m0_unprepare(panel);
- ctx->prepared = true;
-
return ret;
}
@@ -573,9 +555,6 @@ static int s6e63m0_enable(struct drm_panel *panel)
{
struct s6e63m0 *ctx = panel_to_s6e63m0(panel);
- if (ctx->enabled)
- return 0;
-
s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE);
msleep(120);
s6e63m0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON);
@@ -588,8 +567,6 @@ static int s6e63m0_enable(struct drm_panel *panel)
backlight_enable(ctx->bl_dev);
- ctx->enabled = true;
-
return 0;
}
@@ -709,8 +686,6 @@ int s6e63m0_probe(struct device *dev, void *trsp,
dev_set_drvdata(dev, ctx);
ctx->dev = dev;
- ctx->enabled = false;
- ctx->prepared = false;
ret = device_property_read_u32(dev, "max-brightness", &max_brightness);
if (ret)
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 7431cae7427e..d2df227abbea 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -18,8 +18,6 @@ struct s6e88a0_ams452ef01 {
struct mipi_dsi_device *dsi;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
-
- bool prepared;
};
static inline struct
@@ -115,9 +113,6 @@ static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (ctx->prepared)
- return 0;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0) {
dev_err(dev, "Failed to enable regulators: %d\n", ret);
@@ -135,7 +130,6 @@ static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
return ret;
}
- ctx->prepared = true;
return 0;
}
@@ -145,9 +139,6 @@ static int s6e88a0_ams452ef01_unprepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = s6e88a0_ams452ef01_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
@@ -155,7 +146,6 @@ static int s6e88a0_ams452ef01_unprepare(struct drm_panel *panel)
gpiod_set_value_cansleep(ctx->reset_gpio, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- ctx->prepared = false;
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-sofef00.c b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
index cbf9607dd576..04ce925b3d9d 100644
--- a/drivers/gpu/drm/panel/panel-samsung-sofef00.c
+++ b/drivers/gpu/drm/panel/panel-samsung-sofef00.c
@@ -23,7 +23,6 @@ struct sofef00_panel {
struct regulator *supply;
struct gpio_desc *reset_gpio;
const struct drm_display_mode *mode;
- bool prepared;
};
static inline
@@ -113,9 +112,6 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (ctx->prepared)
- return 0;
-
ret = regulator_enable(ctx->supply);
if (ret < 0) {
dev_err(dev, "Failed to enable regulator: %d\n", ret);
@@ -131,7 +127,6 @@ static int sofef00_panel_prepare(struct drm_panel *panel)
return ret;
}
- ctx->prepared = true;
return 0;
}
@@ -141,16 +136,12 @@ static int sofef00_panel_unprepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = sofef00_panel_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
regulator_disable(ctx->supply);
- ctx->prepared = false;
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
index 68f52eaaf4fa..74c760ee0c2d 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls060t1sx01.c
@@ -24,7 +24,6 @@ struct sharp_ls060 {
struct regulator *avdd_supply;
struct regulator *avee_supply;
struct gpio_desc *reset_gpio;
- bool prepared;
};
static inline struct sharp_ls060 *to_sharp_ls060(struct drm_panel *panel)
@@ -101,9 +100,6 @@ static int sharp_ls060_prepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (ctx->prepared)
- return 0;
-
ret = regulator_enable(ctx->vddi_supply);
if (ret < 0)
return ret;
@@ -134,8 +130,6 @@ static int sharp_ls060_prepare(struct drm_panel *panel)
goto err_on;
}
- ctx->prepared = true;
-
return 0;
err_on:
@@ -163,9 +157,6 @@ static int sharp_ls060_unprepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = sharp_ls060_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
@@ -181,7 +172,6 @@ static int sharp_ls060_unprepare(struct drm_panel *panel)
regulator_disable(ctx->vddi_supply);
- ctx->prepared = false;
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
index 1bde2f01786b..472195d4bbbe 100644
--- a/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
+++ b/drivers/gpu/drm/panel/panel-sony-td4353-jdi.c
@@ -36,7 +36,6 @@ struct sony_td4353_jdi {
struct regulator_bulk_data supplies[3];
struct gpio_desc *panel_reset_gpio;
struct gpio_desc *touch_reset_gpio;
- bool prepared;
int type;
};
@@ -150,9 +149,6 @@ static int sony_td4353_jdi_prepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (ctx->prepared)
- return 0;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0) {
dev_err(dev, "Failed to enable regulators: %d\n", ret);
@@ -171,7 +167,6 @@ static int sony_td4353_jdi_prepare(struct drm_panel *panel)
return ret;
}
- ctx->prepared = true;
return 0;
}
@@ -181,9 +176,6 @@ static int sony_td4353_jdi_unprepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = sony_td4353_jdi_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to power off panel: %d\n", ret);
@@ -191,7 +183,6 @@ static int sony_td4353_jdi_unprepare(struct drm_panel *panel)
sony_td4353_assert_reset_gpios(ctx, 0);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- ctx->prepared = false;
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
index ee5d20ecc577..6d44970dccd9 100644
--- a/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
+++ b/drivers/gpu/drm/panel/panel-sony-tulip-truly-nt35521.c
@@ -23,8 +23,6 @@ struct truly_nt35521 {
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
struct gpio_desc *blen_gpio;
- bool prepared;
- bool enabled;
};
static inline
@@ -296,9 +294,6 @@ static int truly_nt35521_prepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (ctx->prepared)
- return 0;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret < 0) {
dev_err(dev, "Failed to enable regulators: %d\n", ret);
@@ -314,7 +309,6 @@ static int truly_nt35521_prepare(struct drm_panel *panel)
return ret;
}
- ctx->prepared = true;
return 0;
}
@@ -324,9 +318,6 @@ static int truly_nt35521_unprepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = truly_nt35521_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
@@ -335,7 +326,6 @@ static int truly_nt35521_unprepare(struct drm_panel *panel)
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
ctx->supplies);
- ctx->prepared = false;
return 0;
}
@@ -343,12 +333,8 @@ static int truly_nt35521_enable(struct drm_panel *panel)
{
struct truly_nt35521 *ctx = to_truly_nt35521(panel);
- if (ctx->enabled)
- return 0;
-
gpiod_set_value_cansleep(ctx->blen_gpio, 1);
- ctx->enabled = true;
return 0;
}
@@ -356,12 +342,8 @@ static int truly_nt35521_disable(struct drm_panel *panel)
{
struct truly_nt35521 *ctx = to_truly_nt35521(panel);
- if (!ctx->enabled)
- return 0;
-
gpiod_set_value_cansleep(ctx->blen_gpio, 0);
- ctx->enabled = false;
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c b/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
index 6e77a2d71d81..0156689f41cd 100644
--- a/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
+++ b/drivers/gpu/drm/panel/panel-startek-kd070fhfid015.c
@@ -35,7 +35,6 @@ enum {
};
struct stk_panel {
- bool prepared;
const struct drm_display_mode *mode;
struct backlight_device *backlight;
struct drm_panel base;
@@ -145,16 +144,11 @@ static int stk_panel_unprepare(struct drm_panel *panel)
{
struct stk_panel *stk = to_stk_panel(panel);
- if (!stk->prepared)
- return 0;
-
stk_panel_off(stk);
regulator_bulk_disable(ARRAY_SIZE(stk->supplies), stk->supplies);
gpiod_set_value(stk->reset_gpio, 0);
gpiod_set_value(stk->enable_gpio, 1);
- stk->prepared = false;
-
return 0;
}
@@ -164,9 +158,6 @@ static int stk_panel_prepare(struct drm_panel *panel)
struct device *dev = &stk->dsi->dev;
int ret;
- if (stk->prepared)
- return 0;
-
gpiod_set_value(stk->reset_gpio, 0);
gpiod_set_value(stk->enable_gpio, 0);
ret = regulator_enable(stk->supplies[IOVCC].consumer);
@@ -195,8 +186,6 @@ static int stk_panel_prepare(struct drm_panel *panel)
goto poweroff;
}
- stk->prepared = true;
-
return 0;
poweroff:
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 4f4009f9fe25..b73448cf349d 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -64,8 +64,6 @@ struct truly_nt35597 {
struct mipi_dsi_device *dsi[2];
const struct nt35597_config *config;
- bool prepared;
- bool enabled;
};
static inline struct truly_nt35597 *panel_to_ctx(struct drm_panel *panel)
@@ -313,16 +311,12 @@ static int truly_nt35597_disable(struct drm_panel *panel)
struct truly_nt35597 *ctx = panel_to_ctx(panel);
int ret;
- if (!ctx->enabled)
- return 0;
-
if (ctx->backlight) {
ret = backlight_disable(ctx->backlight);
if (ret < 0)
dev_err(ctx->dev, "backlight disable failed %d\n", ret);
}
- ctx->enabled = false;
return 0;
}
@@ -331,9 +325,6 @@ static int truly_nt35597_unprepare(struct drm_panel *panel)
struct truly_nt35597 *ctx = panel_to_ctx(panel);
int ret = 0;
- if (!ctx->prepared)
- return 0;
-
ctx->dsi[0]->mode_flags = 0;
ctx->dsi[1]->mode_flags = 0;
@@ -354,7 +345,6 @@ static int truly_nt35597_unprepare(struct drm_panel *panel)
if (ret < 0)
dev_err(ctx->dev, "power_off failed ret = %d\n", ret);
- ctx->prepared = false;
return ret;
}
@@ -367,9 +357,6 @@ static int truly_nt35597_prepare(struct drm_panel *panel)
const struct nt35597_config *config;
u32 num_cmds;
- if (ctx->prepared)
- return 0;
-
ret = truly_35597_power_on(ctx);
if (ret < 0)
return ret;
@@ -409,8 +396,6 @@ static int truly_nt35597_prepare(struct drm_panel *panel)
/* Per DSI spec wait 120ms after sending set_display_on DCS command */
msleep(120);
- ctx->prepared = true;
-
return 0;
power_off:
@@ -424,17 +409,12 @@ static int truly_nt35597_enable(struct drm_panel *panel)
struct truly_nt35597 *ctx = panel_to_ctx(panel);
int ret;
- if (ctx->enabled)
- return 0;
-
if (ctx->backlight) {
ret = backlight_enable(ctx->backlight);
if (ret < 0)
dev_err(ctx->dev, "backlight enable failed %d\n", ret);
}
- ctx->enabled = true;
-
return 0;
}
diff --git a/drivers/gpu/drm/panel/panel-visionox-r66451.c b/drivers/gpu/drm/panel/panel-visionox-r66451.c
index 00fc28ad3d07..fbb73464de33 100644
--- a/drivers/gpu/drm/panel/panel-visionox-r66451.c
+++ b/drivers/gpu/drm/panel/panel-visionox-r66451.c
@@ -22,7 +22,6 @@ struct visionox_r66451 {
struct mipi_dsi_device *dsi;
struct gpio_desc *reset_gpio;
struct regulator_bulk_data supplies[2];
- bool prepared, enabled;
};
static inline struct visionox_r66451 *to_visionox_r66451(struct drm_panel *panel)
@@ -124,9 +123,6 @@ static int visionox_r66451_prepare(struct drm_panel *panel)
struct device *dev = &dsi->dev;
int ret;
- if (ctx->prepared)
- return 0;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
@@ -144,7 +140,6 @@ static int visionox_r66451_prepare(struct drm_panel *panel)
mipi_dsi_compression_mode(ctx->dsi, true);
- ctx->prepared = true;
return 0;
}
@@ -154,9 +149,6 @@ static int visionox_r66451_unprepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = visionox_r66451_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
@@ -164,7 +156,6 @@ static int visionox_r66451_unprepare(struct drm_panel *panel)
gpiod_set_value_cansleep(ctx->reset_gpio, 1);
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- ctx->prepared = false;
return 0;
}
@@ -190,9 +181,6 @@ static int visionox_r66451_enable(struct drm_panel *panel)
struct drm_dsc_picture_parameter_set pps;
int ret;
- if (ctx->enabled)
- return 0;
-
if (!dsi->dsc) {
dev_err(&dsi->dev, "DSC not attached to DSI\n");
return -ENODEV;
@@ -219,8 +207,6 @@ static int visionox_r66451_enable(struct drm_panel *panel)
}
msleep(20);
- ctx->enabled = true;
-
return 0;
}
@@ -231,8 +217,6 @@ static int visionox_r66451_disable(struct drm_panel *panel)
struct device *dev = &dsi->dev;
int ret;
- ctx->enabled = false;
-
ret = mipi_dsi_dcs_set_display_off(dsi);
if (ret < 0) {
dev_err(dev, "Failed to set display off: %d\n", ret);
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
index c2806e4fd553..775144695283 100644
--- a/drivers/gpu/drm/panel/panel-visionox-rm69299.c
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -20,8 +20,6 @@ struct visionox_rm69299 {
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
struct mipi_dsi_device *dsi;
- bool prepared;
- bool enabled;
};
static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
@@ -80,7 +78,6 @@ static int visionox_rm69299_unprepare(struct drm_panel *panel)
ret = visionox_rm69299_power_off(ctx);
- ctx->prepared = false;
return ret;
}
@@ -89,9 +86,6 @@ static int visionox_rm69299_prepare(struct drm_panel *panel)
struct visionox_rm69299 *ctx = panel_to_ctx(panel);
int ret;
- if (ctx->prepared)
- return 0;
-
ret = visionox_rm69299_power_on(ctx);
if (ret < 0)
return ret;
@@ -140,8 +134,6 @@ static int visionox_rm69299_prepare(struct drm_panel *panel)
/* Per DSI spec wait 120ms after sending set_display_on DCS command */
msleep(120);
- ctx->prepared = true;
-
return 0;
power_off:
diff --git a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
index bb0dfd86ea67..a23407b9f6fb 100644
--- a/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
+++ b/drivers/gpu/drm/panel/panel-visionox-vtdr6130.c
@@ -20,7 +20,6 @@ struct visionox_vtdr6130 {
struct mipi_dsi_device *dsi;
struct gpio_desc *reset_gpio;
struct regulator_bulk_data supplies[3];
- bool prepared;
};
static inline struct visionox_vtdr6130 *to_visionox_vtdr6130(struct drm_panel *panel)
@@ -157,9 +156,6 @@ static int visionox_vtdr6130_prepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (ctx->prepared)
- return 0;
-
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies),
ctx->supplies);
if (ret < 0)
@@ -175,7 +171,6 @@ static int visionox_vtdr6130_prepare(struct drm_panel *panel)
return ret;
}
- ctx->prepared = true;
return 0;
}
@@ -185,9 +180,6 @@ static int visionox_vtdr6130_unprepare(struct drm_panel *panel)
struct device *dev = &ctx->dsi->dev;
int ret;
- if (!ctx->prepared)
- return 0;
-
ret = visionox_vtdr6130_off(ctx);
if (ret < 0)
dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
@@ -196,7 +188,6 @@ static int visionox_vtdr6130_unprepare(struct drm_panel *panel)
regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
- ctx->prepared = false;
return 0;
}
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index ba3b5b5f0cdf..02e6b74d5016 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -323,12 +323,18 @@ static void pl111_amba_remove(struct amba_device *amba_dev)
struct pl111_drm_dev_private *priv = drm->dev_private;
drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
drm_dev_put(drm);
of_reserved_mem_device_release(dev);
}
+static void pl111_amba_shutdown(struct amba_device *amba_dev)
+{
+ drm_atomic_helper_shutdown(amba_get_drvdata(amba_dev));
+}
+
/*
* This early variant lacks the 565 and 444 pixel formats.
*/
@@ -431,6 +437,7 @@ static struct amba_driver pl111_amba_driver __maybe_unused = {
},
.probe = pl111_amba_probe,
.remove = pl111_amba_remove,
+ .shutdown = pl111_amba_shutdown,
.id_table = pl111_id_table,
};
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index b30ede1cf62d..a4144c62ca93 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -163,6 +163,12 @@ qxl_pci_remove(struct pci_dev *pdev)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
}
+static void
+qxl_pci_shutdown(struct pci_dev *pdev)
+{
+ drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+}
+
DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
@@ -269,6 +275,7 @@ static struct pci_driver qxl_pci_driver = {
.id_table = pciidlist,
.probe = qxl_pci_probe,
.remove = qxl_pci_remove,
+ .shutdown = qxl_pci_shutdown,
.driver.pm = &qxl_pm_ops,
};
diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
index 3b4dde09538a..6dcf3e041113 100644
--- a/drivers/gpu/drm/solomon/ssd130x.c
+++ b/drivers/gpu/drm/solomon/ssd130x.c
@@ -141,14 +141,23 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
};
EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X);
+struct ssd130x_crtc_state {
+ struct drm_crtc_state base;
+ /* Buffer to store pixels in HW format and written to the panel */
+ u8 *data_array;
+};
+
struct ssd130x_plane_state {
struct drm_shadow_plane_state base;
/* Intermediate buffer to convert pixels from XRGB8888 to HW format */
u8 *buffer;
- /* Buffer to store pixels in HW format and written to the panel */
- u8 *data_array;
};
+static inline struct ssd130x_crtc_state *to_ssd130x_crtc_state(struct drm_crtc_state *state)
+{
+ return container_of(state, struct ssd130x_crtc_state, base);
+}
+
static inline struct ssd130x_plane_state *to_ssd130x_plane_state(struct drm_plane_state *state)
{
return container_of(state, struct ssd130x_plane_state, base.base);
@@ -448,13 +457,11 @@ static int ssd130x_init(struct ssd130x_device *ssd130x)
}
static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
- struct ssd130x_plane_state *ssd130x_state,
- struct drm_rect *rect)
+ struct drm_rect *rect, u8 *buf,
+ u8 *data_array)
{
unsigned int x = rect->x1;
unsigned int y = rect->y1;
- u8 *buf = ssd130x_state->buffer;
- u8 *data_array = ssd130x_state->data_array;
unsigned int width = drm_rect_width(rect);
unsigned int height = drm_rect_height(rect);
unsigned int line_length = DIV_ROUND_UP(width, 8);
@@ -550,12 +557,10 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
return ret;
}
-static void ssd130x_clear_screen(struct ssd130x_device *ssd130x,
- struct ssd130x_plane_state *ssd130x_state)
+static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
{
unsigned int page_height = ssd130x->device_info->page_height;
unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
- u8 *data_array = ssd130x_state->data_array;
unsigned int width = ssd130x->width;
int ret, i;
@@ -594,15 +599,13 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x,
}
}
-static int ssd130x_fb_blit_rect(struct drm_plane_state *state,
+static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
const struct iosys_map *vmap,
- struct drm_rect *rect)
+ struct drm_rect *rect,
+ u8 *buf, u8 *data_array)
{
- struct drm_framebuffer *fb = state->fb;
struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
unsigned int page_height = ssd130x->device_info->page_height;
- struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(state);
- u8 *buf = ssd130x_state->buffer;
struct iosys_map dst;
unsigned int dst_pitch;
int ret = 0;
@@ -622,24 +625,31 @@ static int ssd130x_fb_blit_rect(struct drm_plane_state *state,
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
- ssd130x_update_rect(ssd130x, ssd130x_state, rect);
+ ssd130x_update_rect(ssd130x, rect, buf, data_array);
return ret;
}
-static int ssd130x_primary_plane_helper_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
+static int ssd130x_primary_plane_atomic_check(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
struct drm_device *drm = plane->dev;
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
- unsigned int page_height = ssd130x->device_info->page_height;
- unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
+ struct drm_crtc *crtc = plane_state->crtc;
+ struct drm_crtc_state *crtc_state;
const struct drm_format_info *fi;
unsigned int pitch;
int ret;
+ if (!crtc)
+ return -EINVAL;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
ret = drm_plane_helper_atomic_check(plane, state);
if (ret)
return ret;
@@ -654,23 +664,19 @@ static int ssd130x_primary_plane_helper_atomic_check(struct drm_plane *plane,
if (!ssd130x_state->buffer)
return -ENOMEM;
- ssd130x_state->data_array = kcalloc(ssd130x->width, pages, GFP_KERNEL);
- if (!ssd130x_state->data_array) {
- kfree(ssd130x_state->buffer);
- /* Set to prevent a double free in .atomic_destroy_state() */
- ssd130x_state->buffer = NULL;
- return -ENOMEM;
- }
-
return 0;
}
-static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
+static void ssd130x_primary_plane_atomic_update(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
+ struct ssd130x_crtc_state *ssd130x_crtc_state = to_ssd130x_crtc_state(crtc_state);
+ struct ssd130x_plane_state *ssd130x_plane_state = to_ssd130x_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
struct drm_atomic_helper_damage_iter iter;
struct drm_device *drm = plane->dev;
struct drm_rect dst_clip;
@@ -687,24 +693,34 @@ static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
if (!drm_rect_intersect(&dst_clip, &damage))
continue;
- ssd130x_fb_blit_rect(plane_state, &shadow_plane_state->data[0], &dst_clip);
+ ssd130x_fb_blit_rect(fb, &shadow_plane_state->data[0], &dst_clip,
+ ssd130x_plane_state->buffer,
+ ssd130x_crtc_state->data_array);
}
drm_dev_exit(idx);
}
-static void ssd130x_primary_plane_helper_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
+static void ssd130x_primary_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_atomic_state *state)
{
struct drm_device *drm = plane->dev;
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
- struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane->state);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_crtc_state *crtc_state;
+ struct ssd130x_crtc_state *ssd130x_crtc_state;
int idx;
+ if (!plane_state->crtc)
+ return;
+
+ crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
+ ssd130x_crtc_state = to_ssd130x_crtc_state(crtc_state);
+
if (!drm_dev_enter(drm, &idx))
return;
- ssd130x_clear_screen(ssd130x, ssd130x_state);
+ ssd130x_clear_screen(ssd130x, ssd130x_crtc_state->data_array);
drm_dev_exit(idx);
}
@@ -737,9 +753,8 @@ static struct drm_plane_state *ssd130x_primary_plane_duplicate_state(struct drm_
if (!ssd130x_state)
return NULL;
- /* The buffers are not duplicated and are allocated in .atomic_check */
+ /* The buffer is not duplicated and is allocated in .atomic_check */
ssd130x_state->buffer = NULL;
- ssd130x_state->data_array = NULL;
new_shadow_plane_state = &ssd130x_state->base;
@@ -753,7 +768,6 @@ static void ssd130x_primary_plane_destroy_state(struct drm_plane *plane,
{
struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(state);
- kfree(ssd130x_state->data_array);
kfree(ssd130x_state->buffer);
__drm_gem_destroy_shadow_plane_state(&ssd130x_state->base);
@@ -763,9 +777,9 @@ static void ssd130x_primary_plane_destroy_state(struct drm_plane *plane,
static const struct drm_plane_helper_funcs ssd130x_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
- .atomic_check = ssd130x_primary_plane_helper_atomic_check,
- .atomic_update = ssd130x_primary_plane_helper_atomic_update,
- .atomic_disable = ssd130x_primary_plane_helper_atomic_disable,
+ .atomic_check = ssd130x_primary_plane_atomic_check,
+ .atomic_update = ssd130x_primary_plane_atomic_update,
+ .atomic_disable = ssd130x_primary_plane_atomic_disable,
};
static const struct drm_plane_funcs ssd130x_primary_plane_funcs = {
@@ -777,8 +791,8 @@ static const struct drm_plane_funcs ssd130x_primary_plane_funcs = {
.destroy = drm_plane_cleanup,
};
-static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
+static enum drm_mode_status ssd130x_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(crtc->dev);
@@ -793,27 +807,96 @@ static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc
return MODE_OK;
}
+static int ssd130x_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = crtc->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct ssd130x_crtc_state *ssd130x_state = to_ssd130x_crtc_state(crtc_state);
+ unsigned int page_height = ssd130x->device_info->page_height;
+ unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
+ int ret;
+
+ ret = drm_crtc_helper_atomic_check(crtc, state);
+ if (ret)
+ return ret;
+
+ ssd130x_state->data_array = kmalloc(ssd130x->width * pages, GFP_KERNEL);
+ if (!ssd130x_state->data_array)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/* Called during init to allocate the CRTC's atomic state. */
+static void ssd130x_crtc_reset(struct drm_crtc *crtc)
+{
+ struct ssd130x_crtc_state *ssd130x_state;
+
+ WARN_ON(crtc->state);
+
+ ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL);
+ if (!ssd130x_state)
+ return;
+
+ __drm_atomic_helper_crtc_reset(crtc, &ssd130x_state->base);
+}
+
+static struct drm_crtc_state *ssd130x_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct ssd130x_crtc_state *old_ssd130x_state;
+ struct ssd130x_crtc_state *ssd130x_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ old_ssd130x_state = to_ssd130x_crtc_state(crtc->state);
+ ssd130x_state = kmemdup(old_ssd130x_state, sizeof(*ssd130x_state), GFP_KERNEL);
+ if (!ssd130x_state)
+ return NULL;
+
+ /* The buffer is not duplicated and is allocated in .atomic_check */
+ ssd130x_state->data_array = NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &ssd130x_state->base);
+
+ return &ssd130x_state->base;
+}
+
+static void ssd130x_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct ssd130x_crtc_state *ssd130x_state = to_ssd130x_crtc_state(state);
+
+ kfree(ssd130x_state->data_array);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(ssd130x_state);
+}
+
/*
* The CRTC is always enabled. Screen updates are performed by
* the primary plane's atomic_update function. Disabling clears
* the screen in the primary plane's atomic_disable function.
*/
static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
- .mode_valid = ssd130x_crtc_helper_mode_valid,
- .atomic_check = drm_crtc_helper_atomic_check,
+ .mode_valid = ssd130x_crtc_mode_valid,
+ .atomic_check = ssd130x_crtc_atomic_check,
};
static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
+ .reset = ssd130x_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .atomic_duplicate_state = ssd130x_crtc_duplicate_state,
+ .atomic_destroy_state = ssd130x_crtc_destroy_state,
};
-static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_device *drm = encoder->dev;
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
@@ -838,8 +921,8 @@ power_off:
return;
}
-static void ssd130x_encoder_helper_atomic_disable(struct drm_encoder *encoder,
- struct drm_atomic_state *state)
+static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder,
+ struct drm_atomic_state *state)
{
struct drm_device *drm = encoder->dev;
struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
@@ -852,15 +935,15 @@ static void ssd130x_encoder_helper_atomic_disable(struct drm_encoder *encoder,
}
static const struct drm_encoder_helper_funcs ssd130x_encoder_helper_funcs = {
- .atomic_enable = ssd130x_encoder_helper_atomic_enable,
- .atomic_disable = ssd130x_encoder_helper_atomic_disable,
+ .atomic_enable = ssd130x_encoder_atomic_enable,
+ .atomic_disable = ssd130x_encoder_atomic_disable,
};
static const struct drm_encoder_funcs ssd130x_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
-static int ssd130x_connector_helper_get_modes(struct drm_connector *connector)
+static int ssd130x_connector_get_modes(struct drm_connector *connector)
{
struct ssd130x_device *ssd130x = drm_to_ssd130x(connector->dev);
struct drm_display_mode *mode;
@@ -880,7 +963,7 @@ static int ssd130x_connector_helper_get_modes(struct drm_connector *connector)
}
static const struct drm_connector_helper_funcs ssd130x_connector_helper_funcs = {
- .get_modes = ssd130x_connector_helper_get_modes,
+ .get_modes = ssd130x_connector_get_modes,
};
static const struct drm_connector_funcs ssd130x_connector_funcs = {
@@ -1162,6 +1245,7 @@ EXPORT_SYMBOL_GPL(ssd130x_probe);
void ssd130x_remove(struct ssd130x_device *ssd130x)
{
drm_dev_unplug(&ssd130x->drm);
+ drm_atomic_helper_shutdown(&ssd130x->drm);
}
EXPORT_SYMBOL_GPL(ssd130x_remove);
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 2390c1bb6596..4bab93c4fefd 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -174,6 +174,7 @@ static void sti_cleanup(struct drm_device *ddev)
drm_atomic_helper_shutdown(ddev);
drm_mode_config_cleanup(ddev);
component_unbind_all(ddev->dev, ddev);
+ dev_set_drvdata(ddev->dev, NULL);
kfree(private);
ddev->dev_private = NULL;
}
@@ -253,6 +254,11 @@ static void sti_platform_remove(struct platform_device *pdev)
component_master_del(&pdev->dev, &sti_ops);
}
+static void sti_platform_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static const struct of_device_id sti_dt_ids[] = {
{ .compatible = "st,sti-display-subsystem", },
{ /* end node */ },
@@ -262,6 +268,7 @@ MODULE_DEVICE_TABLE(of, sti_dt_ids);
static struct platform_driver sti_platform_driver = {
.probe = sti_platform_probe,
.remove_new = sti_platform_remove,
+ .shutdown = sti_platform_shutdown,
.driver = {
.name = DRIVER_NAME,
.of_match_table = sti_dt_ids,
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index c68c831136c9..e8523abef27a 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -114,6 +114,7 @@ static void drv_unload(struct drm_device *ddev)
DRM_DEBUG("%s\n", __func__);
drm_kms_helper_poll_fini(ddev);
+ drm_atomic_helper_shutdown(ddev);
ltdc_unload(ddev);
}
@@ -225,6 +226,11 @@ static void stm_drm_platform_remove(struct platform_device *pdev)
drm_dev_put(ddev);
}
+static void stm_drm_platform_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static const struct of_device_id drv_dt_ids[] = {
{ .compatible = "st,stm32-ltdc"},
{ /* end node */ },
@@ -234,6 +240,7 @@ MODULE_DEVICE_TABLE(of, drv_dt_ids);
static struct platform_driver stm_drm_platform_driver = {
.probe = stm_drm_platform_probe,
.remove_new = stm_drm_platform_remove,
+ .shutdown = stm_drm_platform_shutdown,
.driver = {
.name = "stm32-display",
.of_match_table = drv_dt_ids,
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 6a8dfc022d3c..35d7a7ffd208 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -413,6 +413,11 @@ static void sun4i_drv_remove(struct platform_device *pdev)
component_master_del(&pdev->dev, &sun4i_drv_master_ops);
}
+static void sun4i_drv_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun4i-a10-display-engine" },
{ .compatible = "allwinner,sun5i-a10s-display-engine" },
@@ -437,6 +442,7 @@ MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
static struct platform_driver sun4i_drv_platform_driver = {
.probe = sun4i_drv_probe,
.remove_new = sun4i_drv_remove,
+ .shutdown = sun4i_drv_shutdown,
.driver = {
.name = "sun4i-drm",
.of_match_table = sun4i_drv_of_table,
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 79566c9ea8ff..fc66bbd913b2 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -99,6 +99,7 @@ static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
{
+ struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct tegra_output *output = encoder_to_output(encoder);
struct tegra_rgb *rgb = to_rgb(output);
u32 value;
@@ -108,10 +109,19 @@ static void tegra_rgb_encoder_enable(struct drm_encoder *encoder)
value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
tegra_dc_writel(rgb->dc, value, DC_DISP_DATA_ENABLE_OPTIONS);
- /* XXX: parameterize? */
+ /* configure H- and V-sync signal polarities */
value = tegra_dc_readl(rgb->dc, DC_COM_PIN_OUTPUT_POLARITY(1));
- value &= ~LVS_OUTPUT_POLARITY_LOW;
- value &= ~LHS_OUTPUT_POLARITY_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ value |= LHS_OUTPUT_POLARITY_LOW;
+ else
+ value &= ~LHS_OUTPUT_POLARITY_LOW;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ value |= LVS_OUTPUT_POLARITY_LOW;
+ else
+ value &= ~LVS_OUTPUT_POLARITY_LOW;
+
tegra_dc_writel(rgb->dc, value, DC_COM_PIN_OUTPUT_POLARITY(1));
/* XXX: parameterize? */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index fe56beea3e93..8ebd7134ee21 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -175,6 +175,7 @@ static void tilcdc_fini(struct drm_device *dev)
drm_dev_unregister(dev);
drm_kms_helper_poll_fini(dev);
+ drm_atomic_helper_shutdown(dev);
tilcdc_irq_uninstall(dev);
drm_mode_config_cleanup(dev);
@@ -389,6 +390,7 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
init_failed:
tilcdc_fini(ddev);
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -537,7 +539,8 @@ static void tilcdc_unbind(struct device *dev)
if (!ddev->dev_private)
return;
- tilcdc_fini(dev_get_drvdata(dev));
+ tilcdc_fini(ddev);
+ dev_set_drvdata(dev, NULL);
}
static const struct component_master_ops tilcdc_comp_ops = {
@@ -582,6 +585,11 @@ static int tilcdc_pdev_remove(struct platform_device *pdev)
return 0;
}
+static void tilcdc_pdev_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static const struct of_device_id tilcdc_of_match[] = {
{ .compatible = "ti,am33xx-tilcdc", },
{ .compatible = "ti,da850-tilcdc", },
@@ -592,6 +600,7 @@ MODULE_DEVICE_TABLE(of, tilcdc_of_match);
static struct platform_driver tilcdc_platform_driver = {
.probe = tilcdc_pdev_probe,
.remove = tilcdc_pdev_remove,
+ .shutdown = tilcdc_pdev_shutdown,
.driver = {
.name = "tilcdc",
.pm = pm_sleep_ptr(&tilcdc_pm_ops),
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index d254679a136e..c23c9f0cf49c 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -690,6 +690,11 @@ static void bochs_pci_remove(struct pci_dev *pdev)
drm_dev_put(dev);
}
+static void bochs_pci_shutdown(struct pci_dev *pdev)
+{
+ drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+}
+
static const struct pci_device_id bochs_pci_tbl[] = {
{
.vendor = 0x1234,
@@ -720,6 +725,7 @@ static struct pci_driver bochs_pci_driver = {
.id_table = bochs_pci_tbl,
.probe = bochs_pci_probe,
.remove = bochs_pci_remove,
+ .shutdown = bochs_pci_shutdown,
.driver.pm = &bochs_pm_ops,
};
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 594bc472862f..c5c34cd2edc1 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -727,6 +727,11 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
drm_atomic_helper_shutdown(dev);
}
+static void cirrus_pci_shutdown(struct pci_dev *pdev)
+{
+ drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
+}
+
static const struct pci_device_id pciidlist[] = {
{
.vendor = PCI_VENDOR_ID_CIRRUS,
@@ -748,6 +753,7 @@ static struct pci_driver cirrus_pci_driver = {
.id_table = pciidlist,
.probe = cirrus_pci_probe,
.remove = cirrus_pci_remove,
+ .shutdown = cirrus_pci_shutdown,
};
drm_module_pci_driver(cirrus_pci_driver)
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
index ff86ba1ae1b8..9c597461d1e2 100644
--- a/drivers/gpu/drm/tiny/simpledrm.c
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -6,6 +6,7 @@
#include <linux/of_address.h>
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_aperture.h>
@@ -227,6 +228,12 @@ struct simpledrm_device {
unsigned int regulator_count;
struct regulator **regulators;
#endif
+ /* power-domains */
+#if defined CONFIG_OF && defined CONFIG_PM_GENERIC_DOMAINS
+ int pwr_dom_count;
+ struct device **pwr_dom_devs;
+ struct device_link **pwr_dom_links;
+#endif
/* simplefb settings */
struct drm_display_mode mode;
@@ -468,6 +475,101 @@ static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
}
#endif
+#if defined CONFIG_OF && defined CONFIG_PM_GENERIC_DOMAINS
+/*
+ * Generic power domain handling code.
+ *
+ * Here we handle the power-domains properties of our "simple-framebuffer"
+ * dt node. This is only necessary if there is more than one power-domain.
+ * A single power-domains is handled automatically by the driver core. Multiple
+ * power-domains have to be handled by drivers since the driver core can't know
+ * the correct power sequencing. Power sequencing is not an issue for simpledrm
+ * since the bootloader has put the power domains already in the correct state.
+ * simpledrm has only to ensure they remain active for its lifetime.
+ *
+ * When the driver unloads, we detach from the power-domains.
+ *
+ * We only complain about errors here, no action is taken as the most likely
+ * error can only happen due to a mismatch between the bootloader which set
+ * up the "simple-framebuffer" dt node, and the PM domain providers in the
+ * device tree. Chances are that there are no adverse effects, and if there are,
+ * a clean teardown of the fb probe will not help us much either. So just
+ * complain and carry on, and hope that the user actually gets a working fb at
+ * the end of things.
+ */
+static void simpledrm_device_detach_genpd(void *res)
+{
+ int i;
+ struct simpledrm_device *sdev = res;
+
+ if (sdev->pwr_dom_count <= 1)
+ return;
+
+ for (i = sdev->pwr_dom_count - 1; i >= 0; i--) {
+ if (!sdev->pwr_dom_links[i])
+ device_link_del(sdev->pwr_dom_links[i]);
+ if (!IS_ERR_OR_NULL(sdev->pwr_dom_devs[i]))
+ dev_pm_domain_detach(sdev->pwr_dom_devs[i], true);
+ }
+}
+
+static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
+{
+ struct device *dev = sdev->dev.dev;
+ int i;
+
+ sdev->pwr_dom_count = of_count_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells");
+ /*
+ * Single power-domain devices are handled by driver core nothing to do
+ * here. The same for device nodes without "power-domains" property.
+ */
+ if (sdev->pwr_dom_count <= 1)
+ return 0;
+
+ sdev->pwr_dom_devs = devm_kcalloc(dev, sdev->pwr_dom_count,
+ sizeof(*sdev->pwr_dom_devs),
+ GFP_KERNEL);
+ if (!sdev->pwr_dom_devs)
+ return -ENOMEM;
+
+ sdev->pwr_dom_links = devm_kcalloc(dev, sdev->pwr_dom_count,
+ sizeof(*sdev->pwr_dom_links),
+ GFP_KERNEL);
+ if (!sdev->pwr_dom_links)
+ return -ENOMEM;
+
+ for (i = 0; i < sdev->pwr_dom_count; i++) {
+ sdev->pwr_dom_devs[i] = dev_pm_domain_attach_by_id(dev, i);
+ if (IS_ERR(sdev->pwr_dom_devs[i])) {
+ int ret = PTR_ERR(sdev->pwr_dom_devs[i]);
+ if (ret == -EPROBE_DEFER) {
+ simpledrm_device_detach_genpd(sdev);
+ return ret;
+ }
+ drm_warn(&sdev->dev,
+ "pm_domain_attach_by_id(%u) failed: %d\n", i, ret);
+ continue;
+ }
+
+ sdev->pwr_dom_links[i] = device_link_add(dev,
+ sdev->pwr_dom_devs[i],
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME |
+ DL_FLAG_RPM_ACTIVE);
+ if (!sdev->pwr_dom_links[i])
+ drm_warn(&sdev->dev, "failed to link power-domain %d\n", i);
+ }
+
+ return devm_add_action_or_reset(dev, simpledrm_device_detach_genpd, sdev);
+}
+#else
+static int simpledrm_device_attach_genpd(struct simpledrm_device *sdev)
+{
+ return 0;
+}
+#endif
+
/*
* Modesetting
*/
@@ -653,6 +755,9 @@ static struct simpledrm_device *simpledrm_device_create(struct drm_driver *drv,
ret = simpledrm_device_init_regulators(sdev);
if (ret)
return ERR_PTR(ret);
+ ret = simpledrm_device_attach_genpd(sdev);
+ if (ret)
+ return ERR_PTR(ret);
if (pd) {
width = simplefb_get_width_pd(dev, pd);
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 0bb56d063536..acce210e2554 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -242,6 +242,7 @@ static void tve200_remove(struct platform_device *pdev)
struct tve200_drm_dev_private *priv = drm->dev_private;
drm_dev_unregister(drm);
+ drm_atomic_helper_shutdown(drm);
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
drm_mode_config_cleanup(drm);
@@ -249,6 +250,11 @@ static void tve200_remove(struct platform_device *pdev)
drm_dev_put(drm);
}
+static void tve200_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static const struct of_device_id tve200_of_match[] = {
{
.compatible = "faraday,tve200",
@@ -263,6 +269,7 @@ static struct platform_driver tve200_driver = {
},
.probe = tve200_probe,
.remove_new = tve200_remove,
+ .shutdown = tve200_shutdown,
};
drm_module_platform_driver(tve200_driver);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 4fee15c97c34..047b95812334 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -12,6 +12,7 @@
#include <linux/vt_kern.h>
#include <drm/drm_aperture.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_generic.h>
#include <drm/drm_file.h>
@@ -97,11 +98,19 @@ static void vbox_pci_remove(struct pci_dev *pdev)
struct vbox_private *vbox = pci_get_drvdata(pdev);
drm_dev_unregister(&vbox->ddev);
+ drm_atomic_helper_shutdown(&vbox->ddev);
vbox_irq_fini(vbox);
vbox_mode_fini(vbox);
vbox_hw_fini(vbox);
}
+static void vbox_pci_shutdown(struct pci_dev *pdev)
+{
+ struct vbox_private *vbox = pci_get_drvdata(pdev);
+
+ drm_atomic_helper_shutdown(&vbox->ddev);
+}
+
static int vbox_pm_suspend(struct device *dev)
{
struct vbox_private *vbox = dev_get_drvdata(dev);
@@ -165,6 +174,7 @@ static struct pci_driver vbox_pci_driver = {
.id_table = pciidlist,
.probe = vbox_pci_probe,
.remove = vbox_pci_remove,
+ .shutdown = vbox_pci_shutdown,
.driver.pm = pm_sleep_ptr(&vbox_pm_ops),
};
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 1b3531374967..c133e96b8aca 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -324,21 +324,21 @@ static int vc4_drm_bind(struct device *dev)
if (!is_vc5) {
ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
if (ret)
- return ret;
+ goto err;
ret = vc4_bo_cache_init(drm);
if (ret)
- return ret;
+ goto err;
}
ret = drmm_mode_config_init(drm);
if (ret)
- return ret;
+ goto err;
if (!is_vc5) {
ret = vc4_gem_init(drm);
if (ret)
- return ret;
+ goto err;
}
node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware");
@@ -346,13 +346,15 @@ static int vc4_drm_bind(struct device *dev)
firmware = rpi_firmware_get(node);
of_node_put(node);
- if (!firmware)
- return -EPROBE_DEFER;
+ if (!firmware) {
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
}
ret = drm_aperture_remove_framebuffers(driver);
if (ret)
- return ret;
+ goto err;
if (firmware) {
ret = rpi_firmware_property(firmware,
@@ -366,32 +368,33 @@ static int vc4_drm_bind(struct device *dev)
ret = component_bind_all(dev, drm);
if (ret)
- return ret;
+ goto err;
ret = devm_add_action_or_reset(dev, vc4_component_unbind_all, vc4);
if (ret)
- return ret;
+ goto err;
ret = vc4_plane_create_additional_planes(drm);
if (ret)
- goto unbind_all;
+ goto err;
ret = vc4_kms_load(drm);
if (ret < 0)
- goto unbind_all;
+ goto err;
drm_for_each_crtc(crtc, drm)
vc4_crtc_disable_at_boot(crtc);
ret = drm_dev_register(drm, 0);
if (ret < 0)
- goto unbind_all;
+ goto err;
drm_fbdev_dma_setup(drm, 16);
return 0;
-unbind_all:
+err:
+ platform_set_drvdata(pdev, NULL);
return ret;
}
@@ -401,6 +404,7 @@ static void vc4_drm_unbind(struct device *dev)
drm_dev_unplug(drm);
drm_atomic_helper_shutdown(drm);
+ dev_set_drvdata(dev, NULL);
}
static const struct component_master_ops vc4_drm_ops = {
@@ -444,6 +448,11 @@ static void vc4_platform_drm_remove(struct platform_device *pdev)
component_master_del(&pdev->dev, &vc4_drm_ops);
}
+static void vc4_platform_drm_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
static const struct of_device_id vc4_of_match[] = {
{ .compatible = "brcm,bcm2711-vc5", },
{ .compatible = "brcm,bcm2835-vc4", },
@@ -455,6 +464,7 @@ MODULE_DEVICE_TABLE(of, vc4_of_match);
static struct platform_driver vc4_platform_driver = {
.probe = vc4_platform_drm_probe,
.remove_new = vc4_platform_drm_remove,
+ .shutdown = vc4_platform_drm_shutdown,
.driver = {
.name = "vc4-drm",
.of_match_table = vc4_of_match,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
index c0da89e16e6f..a07e5b7e2f2f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c
@@ -232,6 +232,7 @@ static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
list_for_each_entry(file, &dev->filelist, lhead) {
struct task_struct *task;
struct drm_gem_object *gobj;
+ struct pid *pid;
int id;
/*
@@ -241,8 +242,9 @@ static int vmw_debugfs_gem_info_show(struct seq_file *m, void *unused)
* Therefore, we need to protect this ->comm access using RCU.
*/
rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_TGID);
- seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
+ pid = rcu_dereference(file->pid);
+ task = pid_task(pid, PIDTYPE_TGID);
+ seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
task ? task->comm : "<unknown>");
rcu_read_unlock();
diff --git a/drivers/video/fbdev/au1200fb.c b/drivers/video/fbdev/au1200fb.c
index c137d6afe484..98afd385c49c 100644
--- a/drivers/video/fbdev/au1200fb.c
+++ b/drivers/video/fbdev/au1200fb.c
@@ -1719,15 +1719,6 @@ static int au1200fb_drv_probe(struct platform_device *dev)
}
au1200fb_fb_set_par(fbi);
-
-#if !defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_LOGO)
- if (plane == 0)
- if (fb_prepare_logo(fbi, FB_ROTATE_UR)) {
- /* Start display and show logo on boot */
- fb_set_cmap(&fbi->cmap, fbi);
- fb_show_logo(fbi, FB_ROTATE_UR);
- }
-#endif
}
/* Now hook interrupt too */
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index edfde2948e5c..36d3156dc759 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -23,6 +23,8 @@ fb-y += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
endif
endif
+fb-$(CONFIG_LOGO) += fb_logo.o
+
obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o
diff --git a/drivers/video/fbdev/core/fb_internal.h b/drivers/video/fbdev/core/fb_internal.h
index 4c8d509a0026..613832d335fe 100644
--- a/drivers/video/fbdev/core/fb_internal.h
+++ b/drivers/video/fbdev/core/fb_internal.h
@@ -20,6 +20,23 @@ static inline void fb_unregister_chrdev(void)
{ }
#endif
+/* fb_logo.c */
+#if defined(CONFIG_LOGO)
+extern bool fb_center_logo;
+extern int fb_logo_count;
+int fb_prepare_logo(struct fb_info *fb_info, int rotate);
+int fb_show_logo(struct fb_info *fb_info, int rotate);
+#else
+static inline int fb_prepare_logo(struct fb_info *info, int rotate)
+{
+ return 0;
+}
+static inline int fb_show_logo(struct fb_info *info, int rotate)
+{
+ return 0;
+}
+#endif /* CONFIG_LOGO */
+
/* fbmem.c */
extern struct class *fb_class;
extern struct mutex registration_lock;
diff --git a/drivers/video/fbdev/core/fb_logo.c b/drivers/video/fbdev/core/fb_logo.c
new file mode 100644
index 000000000000..0bab8352b684
--- /dev/null
+++ b/drivers/video/fbdev/core/fb_logo.c
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/fb.h>
+#include <linux/linux_logo.h>
+
+#include "fb_internal.h"
+
+bool fb_center_logo __read_mostly;
+int fb_logo_count __read_mostly = -1;
+
+static inline unsigned int safe_shift(unsigned int d, int n)
+{
+ return n < 0 ? d >> -n : d << n;
+}
+
+static void fb_set_logocmap(struct fb_info *info,
+ const struct linux_logo *logo)
+{
+ struct fb_cmap palette_cmap;
+ u16 palette_green[16];
+ u16 palette_blue[16];
+ u16 palette_red[16];
+ int i, j, n;
+ const unsigned char *clut = logo->clut;
+
+ palette_cmap.start = 0;
+ palette_cmap.len = 16;
+ palette_cmap.red = palette_red;
+ palette_cmap.green = palette_green;
+ palette_cmap.blue = palette_blue;
+ palette_cmap.transp = NULL;
+
+ for (i = 0; i < logo->clutsize; i += n) {
+ n = logo->clutsize - i;
+ /* palette_cmap provides space for only 16 colors at once */
+ if (n > 16)
+ n = 16;
+ palette_cmap.start = 32 + i;
+ palette_cmap.len = n;
+ for (j = 0; j < n; ++j) {
+ palette_cmap.red[j] = clut[0] << 8 | clut[0];
+ palette_cmap.green[j] = clut[1] << 8 | clut[1];
+ palette_cmap.blue[j] = clut[2] << 8 | clut[2];
+ clut += 3;
+ }
+ fb_set_cmap(&palette_cmap, info);
+ }
+}
+
+static void fb_set_logo_truepalette(struct fb_info *info,
+ const struct linux_logo *logo,
+ u32 *palette)
+{
+ static const unsigned char mask[] = {
+ 0, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff
+ };
+ unsigned char redmask, greenmask, bluemask;
+ int redshift, greenshift, blueshift;
+ int i;
+ const unsigned char *clut = logo->clut;
+
+ /*
+ * We have to create a temporary palette since console palette is only
+ * 16 colors long.
+ */
+ /* Bug: Doesn't obey msb_right ... (who needs that?) */
+ redmask = mask[info->var.red.length < 8 ? info->var.red.length : 8];
+ greenmask = mask[info->var.green.length < 8 ? info->var.green.length : 8];
+ bluemask = mask[info->var.blue.length < 8 ? info->var.blue.length : 8];
+ redshift = info->var.red.offset - (8 - info->var.red.length);
+ greenshift = info->var.green.offset - (8 - info->var.green.length);
+ blueshift = info->var.blue.offset - (8 - info->var.blue.length);
+
+ for (i = 0; i < logo->clutsize; i++) {
+ palette[i+32] = (safe_shift((clut[0] & redmask), redshift) |
+ safe_shift((clut[1] & greenmask), greenshift) |
+ safe_shift((clut[2] & bluemask), blueshift));
+ clut += 3;
+ }
+}
+
+static void fb_set_logo_directpalette(struct fb_info *info,
+ const struct linux_logo *logo,
+ u32 *palette)
+{
+ int redshift, greenshift, blueshift;
+ int i;
+
+ redshift = info->var.red.offset;
+ greenshift = info->var.green.offset;
+ blueshift = info->var.blue.offset;
+
+ for (i = 32; i < 32 + logo->clutsize; i++)
+ palette[i] = i << redshift | i << greenshift | i << blueshift;
+}
+
+static void fb_set_logo(struct fb_info *info,
+ const struct linux_logo *logo, u8 *dst,
+ int depth)
+{
+ int i, j, k;
+ const u8 *src = logo->data;
+ u8 xor = (info->fix.visual == FB_VISUAL_MONO01) ? 0xff : 0;
+ u8 fg = 1, d;
+
+ switch (fb_get_color_depth(&info->var, &info->fix)) {
+ case 1:
+ fg = 1;
+ break;
+ case 2:
+ fg = 3;
+ break;
+ default:
+ fg = 7;
+ break;
+ }
+
+ if (info->fix.visual == FB_VISUAL_MONO01 ||
+ info->fix.visual == FB_VISUAL_MONO10)
+ fg = ~((u8) (0xfff << info->var.green.length));
+
+ switch (depth) {
+ case 4:
+ for (i = 0; i < logo->height; i++)
+ for (j = 0; j < logo->width; src++) {
+ *dst++ = *src >> 4;
+ j++;
+ if (j < logo->width) {
+ *dst++ = *src & 0x0f;
+ j++;
+ }
+ }
+ break;
+ case 1:
+ for (i = 0; i < logo->height; i++) {
+ for (j = 0; j < logo->width; src++) {
+ d = *src ^ xor;
+ for (k = 7; k >= 0 && j < logo->width; k--) {
+ *dst++ = ((d >> k) & 1) ? fg : 0;
+ j++;
+ }
+ }
+ }
+ break;
+ }
+}
+
+/*
+ * Three (3) kinds of logo maps exist. linux_logo_clut224 (>16 colors),
+ * linux_logo_vga16 (16 colors) and linux_logo_mono (2 colors). Depending on
+ * the visual format and color depth of the framebuffer, the DAC, the
+ * pseudo_palette, and the logo data will be adjusted accordingly.
+ *
+ * Case 1 - linux_logo_clut224:
+ * Color exceeds the number of console colors (16), thus we set the hardware DAC
+ * using fb_set_cmap() appropriately. The "needs_cmapreset" flag will be set.
+ *
+ * For visuals that require color info from the pseudo_palette, we also construct
+ * one for temporary use. The "needs_directpalette" or "needs_truepalette" flags
+ * will be set.
+ *
+ * Case 2 - linux_logo_vga16:
+ * The number of colors just matches the console colors, thus there is no need
+ * to set the DAC or the pseudo_palette. However, the bitmap is packed, ie,
+ * each byte contains color information for two pixels (upper and lower nibble).
+ * To be consistent with fb_imageblit() usage, we therefore separate the two
+ * nibbles into separate bytes. The "depth" flag will be set to 4.
+ *
+ * Case 3 - linux_logo_mono:
+ * This is similar with Case 2. Each byte contains information for 8 pixels.
+ * We isolate each bit and expand each into a byte. The "depth" flag will
+ * be set to 1.
+ */
+static struct logo_data {
+ int depth;
+ int needs_directpalette;
+ int needs_truepalette;
+ int needs_cmapreset;
+ const struct linux_logo *logo;
+} fb_logo __read_mostly;
+
+static void fb_rotate_logo_ud(const u8 *in, u8 *out, u32 width, u32 height)
+{
+ u32 size = width * height, i;
+
+ out += size - 1;
+
+ for (i = size; i--; )
+ *out-- = *in++;
+}
+
+static void fb_rotate_logo_cw(const u8 *in, u8 *out, u32 width, u32 height)
+{
+ int i, j, h = height - 1;
+
+ for (i = 0; i < height; i++)
+ for (j = 0; j < width; j++)
+ out[height * j + h - i] = *in++;
+}
+
+static void fb_rotate_logo_ccw(const u8 *in, u8 *out, u32 width, u32 height)
+{
+ int i, j, w = width - 1;
+
+ for (i = 0; i < height; i++)
+ for (j = 0; j < width; j++)
+ out[height * (w - j) + i] = *in++;
+}
+
+static void fb_rotate_logo(struct fb_info *info, u8 *dst,
+ struct fb_image *image, int rotate)
+{
+ u32 tmp;
+
+ if (rotate == FB_ROTATE_UD) {
+ fb_rotate_logo_ud(image->data, dst, image->width,
+ image->height);
+ image->dx = info->var.xres - image->width - image->dx;
+ image->dy = info->var.yres - image->height - image->dy;
+ } else if (rotate == FB_ROTATE_CW) {
+ fb_rotate_logo_cw(image->data, dst, image->width,
+ image->height);
+ swap(image->width, image->height);
+ tmp = image->dy;
+ image->dy = image->dx;
+ image->dx = info->var.xres - image->width - tmp;
+ } else if (rotate == FB_ROTATE_CCW) {
+ fb_rotate_logo_ccw(image->data, dst, image->width,
+ image->height);
+ swap(image->width, image->height);
+ tmp = image->dx;
+ image->dx = image->dy;
+ image->dy = info->var.yres - image->height - tmp;
+ }
+
+ image->data = dst;
+}
+
+static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
+ int rotate, unsigned int num)
+{
+ unsigned int x;
+
+ if (image->width > info->var.xres || image->height > info->var.yres)
+ return;
+
+ if (rotate == FB_ROTATE_UR) {
+ for (x = 0;
+ x < num && image->dx + image->width <= info->var.xres;
+ x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dx += image->width + 8;
+ }
+ } else if (rotate == FB_ROTATE_UD) {
+ u32 dx = image->dx;
+
+ for (x = 0; x < num && image->dx <= dx; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dx -= image->width + 8;
+ }
+ } else if (rotate == FB_ROTATE_CW) {
+ for (x = 0;
+ x < num && image->dy + image->height <= info->var.yres;
+ x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dy += image->height + 8;
+ }
+ } else if (rotate == FB_ROTATE_CCW) {
+ u32 dy = image->dy;
+
+ for (x = 0; x < num && image->dy <= dy; x++) {
+ info->fbops->fb_imageblit(info, image);
+ image->dy -= image->height + 8;
+ }
+ }
+}
+
+static int fb_show_logo_line(struct fb_info *info, int rotate,
+ const struct linux_logo *logo, int y,
+ unsigned int n)
+{
+ u32 *palette = NULL, *saved_pseudo_palette = NULL;
+ unsigned char *logo_new = NULL, *logo_rotate = NULL;
+ struct fb_image image;
+
+ /* Return if the frame buffer is not mapped or suspended */
+ if (logo == NULL || info->state != FBINFO_STATE_RUNNING ||
+ info->fbops->owner)
+ return 0;
+
+ image.depth = 8;
+ image.data = logo->data;
+
+ if (fb_logo.needs_cmapreset)
+ fb_set_logocmap(info, logo);
+
+ if (fb_logo.needs_truepalette ||
+ fb_logo.needs_directpalette) {
+ palette = kmalloc(256 * 4, GFP_KERNEL);
+ if (palette == NULL)
+ return 0;
+
+ if (fb_logo.needs_truepalette)
+ fb_set_logo_truepalette(info, logo, palette);
+ else
+ fb_set_logo_directpalette(info, logo, palette);
+
+ saved_pseudo_palette = info->pseudo_palette;
+ info->pseudo_palette = palette;
+ }
+
+ if (fb_logo.depth <= 4) {
+ logo_new = kmalloc_array(logo->width, logo->height,
+ GFP_KERNEL);
+ if (logo_new == NULL) {
+ kfree(palette);
+ if (saved_pseudo_palette)
+ info->pseudo_palette = saved_pseudo_palette;
+ return 0;
+ }
+ image.data = logo_new;
+ fb_set_logo(info, logo, logo_new, fb_logo.depth);
+ }
+
+ if (fb_center_logo) {
+ int xres = info->var.xres;
+ int yres = info->var.yres;
+
+ if (rotate == FB_ROTATE_CW || rotate == FB_ROTATE_CCW) {
+ xres = info->var.yres;
+ yres = info->var.xres;
+ }
+
+ while (n && (n * (logo->width + 8) - 8 > xres))
+ --n;
+ image.dx = (xres - (n * (logo->width + 8) - 8)) / 2;
+ image.dy = y ?: (yres - logo->height) / 2;
+ } else {
+ image.dx = 0;
+ image.dy = y;
+ }
+
+ image.width = logo->width;
+ image.height = logo->height;
+
+ if (rotate) {
+ logo_rotate = kmalloc_array(logo->width, logo->height,
+ GFP_KERNEL);
+ if (logo_rotate)
+ fb_rotate_logo(info, logo_rotate, &image, rotate);
+ }
+
+ fb_do_show_logo(info, &image, rotate, n);
+
+ kfree(palette);
+ if (saved_pseudo_palette != NULL)
+ info->pseudo_palette = saved_pseudo_palette;
+ kfree(logo_new);
+ kfree(logo_rotate);
+ return image.dy + logo->height;
+}
+
+#ifdef CONFIG_FB_LOGO_EXTRA
+
+#define FB_LOGO_EX_NUM_MAX 10
+static struct logo_data_extra {
+ const struct linux_logo *logo;
+ unsigned int n;
+} fb_logo_ex[FB_LOGO_EX_NUM_MAX];
+static unsigned int fb_logo_ex_num;
+
+void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n)
+{
+ if (!n || fb_logo_ex_num == FB_LOGO_EX_NUM_MAX)
+ return;
+
+ fb_logo_ex[fb_logo_ex_num].logo = logo;
+ fb_logo_ex[fb_logo_ex_num].n = n;
+ fb_logo_ex_num++;
+}
+
+static int fb_prepare_extra_logos(struct fb_info *info, unsigned int height,
+ unsigned int yres)
+{
+ unsigned int i;
+
+ /* FIXME: logo_ex supports only truecolor fb. */
+ if (info->fix.visual != FB_VISUAL_TRUECOLOR)
+ fb_logo_ex_num = 0;
+
+ for (i = 0; i < fb_logo_ex_num; i++) {
+ if (fb_logo_ex[i].logo->type != fb_logo.logo->type) {
+ fb_logo_ex[i].logo = NULL;
+ continue;
+ }
+ height += fb_logo_ex[i].logo->height;
+ if (height > yres) {
+ height -= fb_logo_ex[i].logo->height;
+ fb_logo_ex_num = i;
+ break;
+ }
+ }
+ return height;
+}
+
+static int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
+{
+ unsigned int i;
+
+ for (i = 0; i < fb_logo_ex_num; i++)
+ y = fb_show_logo_line(info, rotate,
+ fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
+
+ return y;
+}
+#endif /* CONFIG_FB_LOGO_EXTRA */
+
+int fb_prepare_logo(struct fb_info *info, int rotate)
+{
+ int depth = fb_get_color_depth(&info->var, &info->fix);
+ unsigned int yres;
+ int height;
+
+ memset(&fb_logo, 0, sizeof(struct logo_data));
+
+ if (info->flags & FBINFO_MISC_TILEBLITTING ||
+ info->fbops->owner || !fb_logo_count)
+ return 0;
+
+ if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
+ depth = info->var.blue.length;
+ if (info->var.red.length < depth)
+ depth = info->var.red.length;
+ if (info->var.green.length < depth)
+ depth = info->var.green.length;
+ }
+
+ if (info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR && depth > 4) {
+ /* assume console colormap */
+ depth = 4;
+ }
+
+ /* Return if no suitable logo was found */
+ fb_logo.logo = fb_find_logo(depth);
+
+ if (!fb_logo.logo)
+ return 0;
+
+ if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD)
+ yres = info->var.yres;
+ else
+ yres = info->var.xres;
+
+ if (fb_logo.logo->height > yres) {
+ fb_logo.logo = NULL;
+ return 0;
+ }
+
+ /* What depth we asked for might be different from what we get */
+ if (fb_logo.logo->type == LINUX_LOGO_CLUT224)
+ fb_logo.depth = 8;
+ else if (fb_logo.logo->type == LINUX_LOGO_VGA16)
+ fb_logo.depth = 4;
+ else
+ fb_logo.depth = 1;
+
+
+ if (fb_logo.depth > 4 && depth > 4) {
+ switch (info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ fb_logo.needs_truepalette = 1;
+ break;
+ case FB_VISUAL_DIRECTCOLOR:
+ fb_logo.needs_directpalette = 1;
+ fb_logo.needs_cmapreset = 1;
+ break;
+ case FB_VISUAL_PSEUDOCOLOR:
+ fb_logo.needs_cmapreset = 1;
+ break;
+ }
+ }
+
+ height = fb_logo.logo->height;
+ if (fb_center_logo)
+ height += (yres - fb_logo.logo->height) / 2;
+#ifdef CONFIG_FB_LOGO_EXTRA
+ height = fb_prepare_extra_logos(info, height, yres);
+#endif
+
+ return height;
+}
+
+int fb_show_logo(struct fb_info *info, int rotate)
+{
+ unsigned int count;
+ int y;
+
+ if (!fb_logo_count)
+ return 0;
+
+ count = fb_logo_count < 0 ? num_online_cpus() : fb_logo_count;
+ y = fb_show_logo_line(info, rotate, fb_logo.logo, 0, count);
+#ifdef CONFIG_FB_LOGO_EXTRA
+ y = fb_show_extra_logos(info, y, rotate);
+#endif
+
+ return y;
+}
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index f157a5a1dffc..63af6ab034b5 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -472,6 +472,7 @@ static int __init fb_console_setup(char *this_opt)
}
#endif
+#ifdef CONFIG_LOGO
if (!strncmp(options, "logo-pos:", 9)) {
options += 9;
if (!strcmp(options, "center"))
@@ -485,6 +486,7 @@ static int __init fb_console_setup(char *this_opt)
fb_logo_count = simple_strtol(options, &options, 0);
continue;
}
+#endif
}
return 1;
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index ee44a46a66be..fc206755f5f6 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -11,30 +11,12 @@
* for more details.
*/
-#include <linux/module.h>
-
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/vt.h>
-#include <linux/init.h>
-#include <linux/linux_logo.h>
-#include <linux/platform_device.h>
#include <linux/console.h>
-#include <linux/kmod.h>
-#include <linux/err.h>
-#include <linux/device.h>
-#include <linux/efi.h>
+#include <linux/export.h>
#include <linux/fb.h>
#include <linux/fbcon.h>
-#include <linux/mem_encrypt.h>
-#include <linux/pci.h>
#include <video/nomodeset.h>
-#include <video/vga.h>
#include "fb_internal.h"
@@ -53,10 +35,6 @@ int num_registered_fb __read_mostly;
for (i = 0; i < FB_MAX; i++) \
if (!registered_fb[i]) {} else
-bool fb_center_logo __read_mostly;
-
-int fb_logo_count __read_mostly = -1;
-
struct fb_info *get_fb_info(unsigned int idx)
{
struct fb_info *fb_info;
@@ -184,524 +162,6 @@ char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size
}
EXPORT_SYMBOL(fb_get_buffer_offset);
-#ifdef CONFIG_LOGO
-
-static inline unsigned safe_shift(unsigned d, int n)
-{
- return n < 0 ? d >> -n : d << n;
-}
-
-static void fb_set_logocmap(struct fb_info *info,
- const struct linux_logo *logo)
-{
- struct fb_cmap palette_cmap;
- u16 palette_green[16];
- u16 palette_blue[16];
- u16 palette_red[16];
- int i, j, n;
- const unsigned char *clut = logo->clut;
-
- palette_cmap.start = 0;
- palette_cmap.len = 16;
- palette_cmap.red = palette_red;
- palette_cmap.green = palette_green;
- palette_cmap.blue = palette_blue;
- palette_cmap.transp = NULL;
-
- for (i = 0; i < logo->clutsize; i += n) {
- n = logo->clutsize - i;
- /* palette_cmap provides space for only 16 colors at once */
- if (n > 16)
- n = 16;
- palette_cmap.start = 32 + i;
- palette_cmap.len = n;
- for (j = 0; j < n; ++j) {
- palette_cmap.red[j] = clut[0] << 8 | clut[0];
- palette_cmap.green[j] = clut[1] << 8 | clut[1];
- palette_cmap.blue[j] = clut[2] << 8 | clut[2];
- clut += 3;
- }
- fb_set_cmap(&palette_cmap, info);
- }
-}
-
-static void fb_set_logo_truepalette(struct fb_info *info,
- const struct linux_logo *logo,
- u32 *palette)
-{
- static const unsigned char mask[] = { 0,0x80,0xc0,0xe0,0xf0,0xf8,0xfc,0xfe,0xff };
- unsigned char redmask, greenmask, bluemask;
- int redshift, greenshift, blueshift;
- int i;
- const unsigned char *clut = logo->clut;
-
- /*
- * We have to create a temporary palette since console palette is only
- * 16 colors long.
- */
- /* Bug: Doesn't obey msb_right ... (who needs that?) */
- redmask = mask[info->var.red.length < 8 ? info->var.red.length : 8];
- greenmask = mask[info->var.green.length < 8 ? info->var.green.length : 8];
- bluemask = mask[info->var.blue.length < 8 ? info->var.blue.length : 8];
- redshift = info->var.red.offset - (8 - info->var.red.length);
- greenshift = info->var.green.offset - (8 - info->var.green.length);
- blueshift = info->var.blue.offset - (8 - info->var.blue.length);
-
- for ( i = 0; i < logo->clutsize; i++) {
- palette[i+32] = (safe_shift((clut[0] & redmask), redshift) |
- safe_shift((clut[1] & greenmask), greenshift) |
- safe_shift((clut[2] & bluemask), blueshift));
- clut += 3;
- }
-}
-
-static void fb_set_logo_directpalette(struct fb_info *info,
- const struct linux_logo *logo,
- u32 *palette)
-{
- int redshift, greenshift, blueshift;
- int i;
-
- redshift = info->var.red.offset;
- greenshift = info->var.green.offset;
- blueshift = info->var.blue.offset;
-
- for (i = 32; i < 32 + logo->clutsize; i++)
- palette[i] = i << redshift | i << greenshift | i << blueshift;
-}
-
-static void fb_set_logo(struct fb_info *info,
- const struct linux_logo *logo, u8 *dst,
- int depth)
-{
- int i, j, k;
- const u8 *src = logo->data;
- u8 xor = (info->fix.visual == FB_VISUAL_MONO01) ? 0xff : 0;
- u8 fg = 1, d;
-
- switch (fb_get_color_depth(&info->var, &info->fix)) {
- case 1:
- fg = 1;
- break;
- case 2:
- fg = 3;
- break;
- default:
- fg = 7;
- break;
- }
-
- if (info->fix.visual == FB_VISUAL_MONO01 ||
- info->fix.visual == FB_VISUAL_MONO10)
- fg = ~((u8) (0xfff << info->var.green.length));
-
- switch (depth) {
- case 4:
- for (i = 0; i < logo->height; i++)
- for (j = 0; j < logo->width; src++) {
- *dst++ = *src >> 4;
- j++;
- if (j < logo->width) {
- *dst++ = *src & 0x0f;
- j++;
- }
- }
- break;
- case 1:
- for (i = 0; i < logo->height; i++) {
- for (j = 0; j < logo->width; src++) {
- d = *src ^ xor;
- for (k = 7; k >= 0 && j < logo->width; k--) {
- *dst++ = ((d >> k) & 1) ? fg : 0;
- j++;
- }
- }
- }
- break;
- }
-}
-
-/*
- * Three (3) kinds of logo maps exist. linux_logo_clut224 (>16 colors),
- * linux_logo_vga16 (16 colors) and linux_logo_mono (2 colors). Depending on
- * the visual format and color depth of the framebuffer, the DAC, the
- * pseudo_palette, and the logo data will be adjusted accordingly.
- *
- * Case 1 - linux_logo_clut224:
- * Color exceeds the number of console colors (16), thus we set the hardware DAC
- * using fb_set_cmap() appropriately. The "needs_cmapreset" flag will be set.
- *
- * For visuals that require color info from the pseudo_palette, we also construct
- * one for temporary use. The "needs_directpalette" or "needs_truepalette" flags
- * will be set.
- *
- * Case 2 - linux_logo_vga16:
- * The number of colors just matches the console colors, thus there is no need
- * to set the DAC or the pseudo_palette. However, the bitmap is packed, ie,
- * each byte contains color information for two pixels (upper and lower nibble).
- * To be consistent with fb_imageblit() usage, we therefore separate the two
- * nibbles into separate bytes. The "depth" flag will be set to 4.
- *
- * Case 3 - linux_logo_mono:
- * This is similar with Case 2. Each byte contains information for 8 pixels.
- * We isolate each bit and expand each into a byte. The "depth" flag will
- * be set to 1.
- */
-static struct logo_data {
- int depth;
- int needs_directpalette;
- int needs_truepalette;
- int needs_cmapreset;
- const struct linux_logo *logo;
-} fb_logo __read_mostly;
-
-static void fb_rotate_logo_ud(const u8 *in, u8 *out, u32 width, u32 height)
-{
- u32 size = width * height, i;
-
- out += size - 1;
-
- for (i = size; i--; )
- *out-- = *in++;
-}
-
-static void fb_rotate_logo_cw(const u8 *in, u8 *out, u32 width, u32 height)
-{
- int i, j, h = height - 1;
-
- for (i = 0; i < height; i++)
- for (j = 0; j < width; j++)
- out[height * j + h - i] = *in++;
-}
-
-static void fb_rotate_logo_ccw(const u8 *in, u8 *out, u32 width, u32 height)
-{
- int i, j, w = width - 1;
-
- for (i = 0; i < height; i++)
- for (j = 0; j < width; j++)
- out[height * (w - j) + i] = *in++;
-}
-
-static void fb_rotate_logo(struct fb_info *info, u8 *dst,
- struct fb_image *image, int rotate)
-{
- u32 tmp;
-
- if (rotate == FB_ROTATE_UD) {
- fb_rotate_logo_ud(image->data, dst, image->width,
- image->height);
- image->dx = info->var.xres - image->width - image->dx;
- image->dy = info->var.yres - image->height - image->dy;
- } else if (rotate == FB_ROTATE_CW) {
- fb_rotate_logo_cw(image->data, dst, image->width,
- image->height);
- swap(image->width, image->height);
- tmp = image->dy;
- image->dy = image->dx;
- image->dx = info->var.xres - image->width - tmp;
- } else if (rotate == FB_ROTATE_CCW) {
- fb_rotate_logo_ccw(image->data, dst, image->width,
- image->height);
- swap(image->width, image->height);
- tmp = image->dx;
- image->dx = image->dy;
- image->dy = info->var.yres - image->height - tmp;
- }
-
- image->data = dst;
-}
-
-static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
- int rotate, unsigned int num)
-{
- unsigned int x;
-
- if (image->width > info->var.xres || image->height > info->var.yres)
- return;
-
- if (rotate == FB_ROTATE_UR) {
- for (x = 0;
- x < num && image->dx + image->width <= info->var.xres;
- x++) {
- info->fbops->fb_imageblit(info, image);
- image->dx += image->width + 8;
- }
- } else if (rotate == FB_ROTATE_UD) {
- u32 dx = image->dx;
-
- for (x = 0; x < num && image->dx <= dx; x++) {
- info->fbops->fb_imageblit(info, image);
- image->dx -= image->width + 8;
- }
- } else if (rotate == FB_ROTATE_CW) {
- for (x = 0;
- x < num && image->dy + image->height <= info->var.yres;
- x++) {
- info->fbops->fb_imageblit(info, image);
- image->dy += image->height + 8;
- }
- } else if (rotate == FB_ROTATE_CCW) {
- u32 dy = image->dy;
-
- for (x = 0; x < num && image->dy <= dy; x++) {
- info->fbops->fb_imageblit(info, image);
- image->dy -= image->height + 8;
- }
- }
-}
-
-static int fb_show_logo_line(struct fb_info *info, int rotate,
- const struct linux_logo *logo, int y,
- unsigned int n)
-{
- u32 *palette = NULL, *saved_pseudo_palette = NULL;
- unsigned char *logo_new = NULL, *logo_rotate = NULL;
- struct fb_image image;
-
- /* Return if the frame buffer is not mapped or suspended */
- if (logo == NULL || info->state != FBINFO_STATE_RUNNING ||
- info->fbops->owner)
- return 0;
-
- image.depth = 8;
- image.data = logo->data;
-
- if (fb_logo.needs_cmapreset)
- fb_set_logocmap(info, logo);
-
- if (fb_logo.needs_truepalette ||
- fb_logo.needs_directpalette) {
- palette = kmalloc(256 * 4, GFP_KERNEL);
- if (palette == NULL)
- return 0;
-
- if (fb_logo.needs_truepalette)
- fb_set_logo_truepalette(info, logo, palette);
- else
- fb_set_logo_directpalette(info, logo, palette);
-
- saved_pseudo_palette = info->pseudo_palette;
- info->pseudo_palette = palette;
- }
-
- if (fb_logo.depth <= 4) {
- logo_new = kmalloc_array(logo->width, logo->height,
- GFP_KERNEL);
- if (logo_new == NULL) {
- kfree(palette);
- if (saved_pseudo_palette)
- info->pseudo_palette = saved_pseudo_palette;
- return 0;
- }
- image.data = logo_new;
- fb_set_logo(info, logo, logo_new, fb_logo.depth);
- }
-
- if (fb_center_logo) {
- int xres = info->var.xres;
- int yres = info->var.yres;
-
- if (rotate == FB_ROTATE_CW || rotate == FB_ROTATE_CCW) {
- xres = info->var.yres;
- yres = info->var.xres;
- }
-
- while (n && (n * (logo->width + 8) - 8 > xres))
- --n;
- image.dx = (xres - (n * (logo->width + 8) - 8)) / 2;
- image.dy = y ?: (yres - logo->height) / 2;
- } else {
- image.dx = 0;
- image.dy = y;
- }
-
- image.width = logo->width;
- image.height = logo->height;
-
- if (rotate) {
- logo_rotate = kmalloc_array(logo->width, logo->height,
- GFP_KERNEL);
- if (logo_rotate)
- fb_rotate_logo(info, logo_rotate, &image, rotate);
- }
-
- fb_do_show_logo(info, &image, rotate, n);
-
- kfree(palette);
- if (saved_pseudo_palette != NULL)
- info->pseudo_palette = saved_pseudo_palette;
- kfree(logo_new);
- kfree(logo_rotate);
- return image.dy + logo->height;
-}
-
-
-#ifdef CONFIG_FB_LOGO_EXTRA
-
-#define FB_LOGO_EX_NUM_MAX 10
-static struct logo_data_extra {
- const struct linux_logo *logo;
- unsigned int n;
-} fb_logo_ex[FB_LOGO_EX_NUM_MAX];
-static unsigned int fb_logo_ex_num;
-
-void fb_append_extra_logo(const struct linux_logo *logo, unsigned int n)
-{
- if (!n || fb_logo_ex_num == FB_LOGO_EX_NUM_MAX)
- return;
-
- fb_logo_ex[fb_logo_ex_num].logo = logo;
- fb_logo_ex[fb_logo_ex_num].n = n;
- fb_logo_ex_num++;
-}
-
-static int fb_prepare_extra_logos(struct fb_info *info, unsigned int height,
- unsigned int yres)
-{
- unsigned int i;
-
- /* FIXME: logo_ex supports only truecolor fb. */
- if (info->fix.visual != FB_VISUAL_TRUECOLOR)
- fb_logo_ex_num = 0;
-
- for (i = 0; i < fb_logo_ex_num; i++) {
- if (fb_logo_ex[i].logo->type != fb_logo.logo->type) {
- fb_logo_ex[i].logo = NULL;
- continue;
- }
- height += fb_logo_ex[i].logo->height;
- if (height > yres) {
- height -= fb_logo_ex[i].logo->height;
- fb_logo_ex_num = i;
- break;
- }
- }
- return height;
-}
-
-static int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
-{
- unsigned int i;
-
- for (i = 0; i < fb_logo_ex_num; i++)
- y = fb_show_logo_line(info, rotate,
- fb_logo_ex[i].logo, y, fb_logo_ex[i].n);
-
- return y;
-}
-
-#else /* !CONFIG_FB_LOGO_EXTRA */
-
-static inline int fb_prepare_extra_logos(struct fb_info *info,
- unsigned int height,
- unsigned int yres)
-{
- return height;
-}
-
-static inline int fb_show_extra_logos(struct fb_info *info, int y, int rotate)
-{
- return y;
-}
-
-#endif /* CONFIG_FB_LOGO_EXTRA */
-
-
-int fb_prepare_logo(struct fb_info *info, int rotate)
-{
- int depth = fb_get_color_depth(&info->var, &info->fix);
- unsigned int yres;
- int height;
-
- memset(&fb_logo, 0, sizeof(struct logo_data));
-
- if (info->flags & FBINFO_MISC_TILEBLITTING ||
- info->fbops->owner || !fb_logo_count)
- return 0;
-
- if (info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
- depth = info->var.blue.length;
- if (info->var.red.length < depth)
- depth = info->var.red.length;
- if (info->var.green.length < depth)
- depth = info->var.green.length;
- }
-
- if (info->fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR && depth > 4) {
- /* assume console colormap */
- depth = 4;
- }
-
- /* Return if no suitable logo was found */
- fb_logo.logo = fb_find_logo(depth);
-
- if (!fb_logo.logo) {
- return 0;
- }
-
- if (rotate == FB_ROTATE_UR || rotate == FB_ROTATE_UD)
- yres = info->var.yres;
- else
- yres = info->var.xres;
-
- if (fb_logo.logo->height > yres) {
- fb_logo.logo = NULL;
- return 0;
- }
-
- /* What depth we asked for might be different from what we get */
- if (fb_logo.logo->type == LINUX_LOGO_CLUT224)
- fb_logo.depth = 8;
- else if (fb_logo.logo->type == LINUX_LOGO_VGA16)
- fb_logo.depth = 4;
- else
- fb_logo.depth = 1;
-
-
- if (fb_logo.depth > 4 && depth > 4) {
- switch (info->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- fb_logo.needs_truepalette = 1;
- break;
- case FB_VISUAL_DIRECTCOLOR:
- fb_logo.needs_directpalette = 1;
- fb_logo.needs_cmapreset = 1;
- break;
- case FB_VISUAL_PSEUDOCOLOR:
- fb_logo.needs_cmapreset = 1;
- break;
- }
- }
-
- height = fb_logo.logo->height;
- if (fb_center_logo)
- height += (yres - fb_logo.logo->height) / 2;
-
- return fb_prepare_extra_logos(info, height, yres);
-}
-
-int fb_show_logo(struct fb_info *info, int rotate)
-{
- unsigned int count;
- int y;
-
- if (!fb_logo_count)
- return 0;
-
- count = fb_logo_count < 0 ? num_online_cpus() : fb_logo_count;
- y = fb_show_logo_line(info, rotate, fb_logo.logo, 0, count);
- y = fb_show_extra_logos(info, y, rotate);
-
- return y;
-}
-#else
-int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
-int fb_show_logo(struct fb_info *info, int rotate) { return 0; }
-#endif /* CONFIG_LOGO */
-EXPORT_SYMBOL(fb_prepare_logo);
-EXPORT_SYMBOL(fb_show_logo);
-
int
fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
{
diff --git a/drivers/video/fbdev/mmp/fb/mmpfb.c b/drivers/video/fbdev/mmp/fb/mmpfb.c
index 42a87474bcea..2d9797c6fb3e 100644
--- a/drivers/video/fbdev/mmp/fb/mmpfb.c
+++ b/drivers/video/fbdev/mmp/fb/mmpfb.c
@@ -628,13 +628,6 @@ static int mmpfb_probe(struct platform_device *pdev)
dev_info(fbi->dev, "loaded to /dev/fb%d <%s>.\n",
info->node, info->fix.id);
-#ifdef CONFIG_LOGO
- if (fbi->fb_start) {
- fb_prepare_logo(info, 0);
- fb_show_logo(info, 0);
- }
-#endif
-
return 0;
failed_clear_info: