summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/v3d
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/v3d')
-rw-r--r--drivers/gpu/drm/v3d/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c314
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c63
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h33
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c104
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c58
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h2
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c13
10 files changed, 218 insertions, 389 deletions
diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig
index 1552bf552c94..75a74c45f109 100644
--- a/drivers/gpu/drm/v3d/Kconfig
+++ b/drivers/gpu/drm/v3d/Kconfig
@@ -5,6 +5,7 @@ config DRM_V3D
depends on COMMON_CLK
depends on MMU
select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
help
Choose this option if you have a system that has a Broadcom
V3D 3.x or newer GPU, such as BCM7268.
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a08766d39eab..c0219ebb4284 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -25,162 +25,6 @@
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
-/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
- * it for DMA.
- */
-static int
-v3d_bo_get_pages(struct v3d_bo *bo)
-{
- struct drm_gem_object *obj = &bo->base;
- struct drm_device *dev = obj->dev;
- int npages = obj->size >> PAGE_SHIFT;
- int ret = 0;
-
- mutex_lock(&bo->lock);
- if (bo->pages_refcount++ != 0)
- goto unlock;
-
- if (!obj->import_attach) {
- bo->pages = drm_gem_get_pages(obj);
- if (IS_ERR(bo->pages)) {
- ret = PTR_ERR(bo->pages);
- goto unlock;
- }
-
- bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
- if (IS_ERR(bo->sgt)) {
- ret = PTR_ERR(bo->sgt);
- goto put_pages;
- }
-
- /* Map the pages for use by the GPU. */
- dma_map_sg(dev->dev, bo->sgt->sgl,
- bo->sgt->nents, DMA_BIDIRECTIONAL);
- } else {
- bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!bo->pages)
- goto put_pages;
-
- drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
- NULL, npages);
-
- /* Note that dma-bufs come in mapped. */
- }
-
- mutex_unlock(&bo->lock);
-
- return 0;
-
-put_pages:
- drm_gem_put_pages(obj, bo->pages, true, true);
- bo->pages = NULL;
-unlock:
- bo->pages_refcount--;
- mutex_unlock(&bo->lock);
- return ret;
-}
-
-static void
-v3d_bo_put_pages(struct v3d_bo *bo)
-{
- struct drm_gem_object *obj = &bo->base;
-
- mutex_lock(&bo->lock);
- if (--bo->pages_refcount == 0) {
- if (!obj->import_attach) {
- dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
- bo->sgt->nents, DMA_BIDIRECTIONAL);
- sg_free_table(bo->sgt);
- kfree(bo->sgt);
- drm_gem_put_pages(obj, bo->pages, true, true);
- } else {
- kfree(bo->pages);
- }
- }
- mutex_unlock(&bo->lock);
-}
-
-static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
- size_t unaligned_size)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_gem_object *obj;
- struct v3d_bo *bo;
- size_t size = roundup(unaligned_size, PAGE_SIZE);
- int ret;
-
- if (size == 0)
- return ERR_PTR(-EINVAL);
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
- obj = &bo->base;
-
- INIT_LIST_HEAD(&bo->vmas);
- INIT_LIST_HEAD(&bo->unref_head);
- mutex_init(&bo->lock);
-
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto free_bo;
-
- spin_lock(&v3d->mm_lock);
- ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
- obj->size >> PAGE_SHIFT,
- GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
- spin_unlock(&v3d->mm_lock);
- if (ret)
- goto free_obj;
-
- return bo;
-
-free_obj:
- drm_gem_object_release(obj);
-free_bo:
- kfree(bo);
- return ERR_PTR(ret);
-}
-
-struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
- size_t unaligned_size)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_gem_object *obj;
- struct v3d_bo *bo;
- int ret;
-
- bo = v3d_bo_create_struct(dev, unaligned_size);
- if (IS_ERR(bo))
- return bo;
- obj = &bo->base;
-
- bo->resv = &bo->_resv;
- reservation_object_init(bo->resv);
-
- ret = v3d_bo_get_pages(bo);
- if (ret)
- goto free_mm;
-
- v3d_mmu_insert_ptes(bo);
-
- mutex_lock(&v3d->bo_lock);
- v3d->bo_stats.num_allocated++;
- v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
- mutex_unlock(&v3d->bo_lock);
-
- return bo;
-
-free_mm:
- spin_lock(&v3d->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&v3d->mm_lock);
-
- drm_gem_object_release(obj);
- kfree(bo);
- return ERR_PTR(ret);
-}
-
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -189,92 +33,116 @@ void v3d_free_object(struct drm_gem_object *obj)
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
+ v3d_mmu_remove_ptes(bo);
+
mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated--;
v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);
- reservation_object_fini(&bo->_resv);
-
- v3d_bo_put_pages(bo);
-
- if (obj->import_attach)
- drm_prime_gem_destroy(obj, bo->sgt);
-
- v3d_mmu_remove_ptes(bo);
spin_lock(&v3d->mm_lock);
drm_mm_remove_node(&bo->node);
spin_unlock(&v3d->mm_lock);
- mutex_destroy(&bo->lock);
+ /* GPU execution may have dirtied any pages in the BO. */
+ bo->base.pages_mark_dirty_on_put = true;
- drm_gem_object_release(obj);
- kfree(bo);
+ drm_gem_shmem_free_object(obj);
}
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj)
+static const struct drm_gem_object_funcs v3d_gem_funcs = {
+ .free = v3d_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/* gem_create_object function for allocating a BO struct and doing
+ * early setup.
+ */
+struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
{
- struct v3d_bo *bo = to_v3d_bo(obj);
+ struct v3d_bo *bo;
+ struct drm_gem_object *obj;
- return bo->resv;
-}
+ if (size == 0)
+ return NULL;
-static void
-v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
-{
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-}
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+ obj = &bo->base.base;
-vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct v3d_bo *bo = to_v3d_bo(obj);
- pfn_t pfn;
- pgoff_t pgoff;
+ obj->funcs = &v3d_gem_funcs;
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+ INIT_LIST_HEAD(&bo->unref_head);
- return vmf_insert_mixed(vma, vmf->address, pfn);
+ return &bo->base.base;
}
-int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
+static int
+v3d_bo_create_finish(struct drm_gem_object *obj)
{
+ struct v3d_dev *v3d = to_v3d_dev(obj->dev);
+ struct v3d_bo *bo = to_v3d_bo(obj);
+ struct sg_table *sgt;
int ret;
- ret = drm_gem_mmap(filp, vma);
+ /* So far we pin the BO in the MMU for its lifetime, so use
+ * shmem's helper for getting a lifetime sgt.
+ */
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ spin_lock(&v3d->mm_lock);
+ /* Allocate the object's space in the GPU's page tables.
+ * Inserting PTEs will happen later, but the offset is for the
+ * lifetime of the BO.
+ */
+ ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
+ obj->size >> PAGE_SHIFT,
+ GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
+ spin_unlock(&v3d->mm_lock);
if (ret)
return ret;
- v3d_set_mmap_vma_flags(vma);
+ /* Track stats for /debug/dri/n/bo_stats. */
+ mutex_lock(&v3d->bo_lock);
+ v3d->bo_stats.num_allocated++;
+ v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
+ mutex_unlock(&v3d->bo_lock);
- return ret;
+ v3d_mmu_insert_ptes(bo);
+
+ return 0;
}
-int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
+ size_t unaligned_size)
{
+ struct drm_gem_shmem_object *shmem_obj;
+ struct v3d_bo *bo;
int ret;
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- v3d_set_mmap_vma_flags(vma);
+ shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
+ if (!shmem_obj)
+ return NULL;
+ bo = to_v3d_bo(&shmem_obj->base);
- return 0;
-}
+ ret = v3d_bo_create_finish(&shmem_obj->base);
+ if (ret)
+ goto free_obj;
-struct sg_table *
-v3d_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct v3d_bo *bo = to_v3d_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
+ return bo;
- return drm_prime_pages_to_sg(bo->pages, npages);
+free_obj:
+ drm_gem_shmem_free_object(&shmem_obj->base);
+ return ERR_PTR(ret);
}
struct drm_gem_object *
@@ -283,20 +151,17 @@ v3d_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
- struct v3d_bo *bo;
-
- bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
- if (IS_ERR(bo))
- return ERR_CAST(bo);
- obj = &bo->base;
-
- bo->resv = attach->dmabuf->resv;
+ int ret;
- bo->sgt = sgt;
- obj->import_attach = attach;
- v3d_bo_get_pages(bo);
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj))
+ return obj;
- v3d_mmu_insert_ptes(bo);
+ ret = v3d_bo_create_finish(obj);
+ if (ret) {
+ drm_gem_shmem_free_object(obj);
+ return ERR_PTR(ret);
+ }
return obj;
}
@@ -319,8 +184,8 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
args->offset = bo->node.start << PAGE_SHIFT;
- ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle);
- drm_gem_object_put_unlocked(&bo->base);
+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
@@ -330,7 +195,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
{
struct drm_v3d_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
- int ret;
if (args->flags != 0) {
DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
@@ -343,12 +207,10 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
- ret = drm_gem_create_mmap_offset(gem_obj);
- if (ret == 0)
- args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
drm_gem_object_put_unlocked(gem_obj);
- return ret;
+ return 0;
}
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index eb2b2d2f8553..a24af2d2f574 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -187,6 +187,11 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
uint32_t cycles;
int core = 0;
int measure_ms = 1000;
+ int ret;
+
+ ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0)
+ return ret;
if (v3d->ver >= 40) {
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
@@ -210,6 +215,9 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
cycles / (measure_ms * 1000),
(cycles / (measure_ms * 100)) % 10);
+ pm_runtime_mark_last_busy(v3d->dev);
+ pm_runtime_put_autosuspend(v3d->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index f0afcec72c34..d600628bb5c1 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -7,9 +7,9 @@
* This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs.
* For V3D 2.x support, see the VC4 driver.
*
- * Currently only single-core rendering using the binner and renderer
- * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD
- * (compute shader dispatch) are not yet supported.
+ * Currently only single-core rendering using the binner and renderer,
+ * along with TFU (texture formatting unit) rendering is supported.
+ * V3D 4.x's CSD (compute shader dispatch) is not yet supported.
*/
#include <linux/clk.h>
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
@@ -160,17 +161,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-static const struct file_operations v3d_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = v3d_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
@@ -188,12 +179,6 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
-static const struct vm_operations_struct v3d_vm_ops = {
- .fault = v3d_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static struct drm_driver v3d_drm_driver = {
.driver_features = (DRIVER_GEM |
DRIVER_RENDER |
@@ -207,17 +192,11 @@ static struct drm_driver v3d_drm_driver = {
.debugfs_init = v3d_debugfs_init,
#endif
- .gem_free_object_unlocked = v3d_free_object,
- .gem_vm_ops = &v3d_vm_ops,
-
+ .gem_create_object = v3d_create_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_res_obj = v3d_prime_res_obj,
- .gem_prime_get_sg_table = v3d_prime_get_sg_table,
.gem_prime_import_sg_table = v3d_prime_import_sg_table,
- .gem_prime_mmap = v3d_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.ioctls = v3d_drm_ioctls,
.num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
@@ -265,10 +244,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
v3d->pdev = pdev;
drm = &v3d->drm;
- ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
- if (ret)
- goto dev_free;
-
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
goto dev_free;
@@ -283,6 +258,22 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
+ v3d->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(v3d->reset)) {
+ ret = PTR_ERR(v3d->reset);
+
+ if (ret == -EPROBE_DEFER)
+ goto dev_free;
+
+ v3d->reset = NULL;
+ ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
+ if (ret) {
+ dev_err(dev,
+ "Failed to get reset control or bridge regs\n");
+ goto dev_free;
+ }
+ }
+
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
@@ -312,14 +303,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
goto dev_destroy;
- v3d_irq_init(v3d);
+ ret = v3d_irq_init(v3d);
+ if (ret)
+ goto gem_destroy;
ret = drm_dev_register(drm, 0);
if (ret)
- goto gem_destroy;
+ goto irq_disable;
return 0;
+irq_disable:
+ v3d_irq_disable(v3d);
gem_destroy:
v3d_gem_destroy(drm);
dev_destroy:
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index fdda3037f7af..7b0fe6240f7d 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2015-2018 Broadcom */
-#include <linux/reservation.h>
#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/gpu_scheduler.h>
#include "uapi/drm/v3d_drm.h"
@@ -34,6 +34,7 @@ struct v3d_dev {
* and revision.
*/
int ver;
+ bool single_irq_line;
struct device *dev;
struct platform_device *pdev;
@@ -42,6 +43,7 @@ struct v3d_dev {
void __iomem *bridge_regs;
void __iomem *gca_regs;
struct clk *clk;
+ struct reset_control *reset;
/* Virtual and DMA addresses of the single shared page table. */
volatile u32 *pt;
@@ -109,34 +111,15 @@ struct v3d_file_priv {
struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
};
-/* Tracks a mapping of a BO into a per-fd address space */
-struct v3d_vma {
- struct v3d_page_table *pt;
- struct list_head list; /* entry in v3d_bo.vmas */
-};
-
struct v3d_bo {
- struct drm_gem_object base;
-
- struct mutex lock;
+ struct drm_gem_shmem_object base;
struct drm_mm_node node;
- u32 pages_refcount;
- struct page **pages;
- struct sg_table *sgt;
- void *vaddr;
-
- struct list_head vmas; /* list of v3d_vma */
-
/* List entry for the BO's position in
* v3d_exec_info->unref_list
*/
struct list_head unref_head;
-
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
};
static inline struct v3d_bo *
@@ -270,6 +253,7 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
}
/* v3d_bo.c */
+struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
void v3d_free_object(struct drm_gem_object *gem_obj);
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t size);
@@ -279,11 +263,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-vm_fault_t v3d_gem_fault(struct vm_fault *vmf);
-int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
-int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
@@ -310,7 +289,7 @@ void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
/* v3d_irq.c */
-void v3d_irq_init(struct v3d_dev *v3d);
+int v3d_irq_init(struct v3d_dev *v3d);
void v3d_irq_enable(struct v3d_dev *v3d);
void v3d_irq_disable(struct v3d_dev *v3d);
void v3d_irq_reset(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 803f31467ec1..b84d89c7b3fb 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/sched/signal.h>
@@ -24,7 +25,8 @@ v3d_init_core(struct v3d_dev *v3d, int core)
* type. If you want the default behavior, you can still put
* "2" in the indirect texture state's output_type field.
*/
- V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
+ if (v3d->ver < 40)
+ V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
/* Whenever we flush the L2T cache, we always want to flush
* the whole thing.
@@ -69,7 +71,7 @@ v3d_idle_gca(struct v3d_dev *v3d)
}
static void
-v3d_reset_v3d(struct v3d_dev *v3d)
+v3d_reset_by_bridge(struct v3d_dev *v3d)
{
int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
@@ -89,6 +91,15 @@ v3d_reset_v3d(struct v3d_dev *v3d)
V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
}
+}
+
+static void
+v3d_reset_v3d(struct v3d_dev *v3d)
+{
+ if (v3d->reset)
+ reset_control_reset(v3d->reset);
+ else
+ v3d_reset_by_bridge(v3d);
v3d_init_hw_state(v3d);
}
@@ -190,7 +201,8 @@ v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
for (i = 0; i < bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(bos[i]->resv, fence);
+ reservation_object_add_excl_fence(bos[i]->base.base.resv,
+ fence);
}
}
@@ -199,12 +211,8 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos,
int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
- int i;
-
- for (i = 0; i < bo_count; i++)
- ww_mutex_unlock(&bos[i]->resv->lock);
-
- ww_acquire_fini(acquire_ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count,
+ acquire_ctx);
}
/* Takes the reservation lock on all the BOs being referenced, so that
@@ -219,58 +227,19 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
- int contended_lock = -1;
int i, ret;
- ww_acquire_init(acquire_ctx, &reservation_ww_class);
-
-retry:
- if (contended_lock != -1) {
- struct v3d_bo *bo = bos[contended_lock];
-
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- acquire_ctx);
- if (ret) {
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- for (i = 0; i < bo_count; i++) {
- if (i == contended_lock)
- continue;
-
- ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
- acquire_ctx);
- if (ret) {
- int j;
-
- for (j = 0; j < i; j++)
- ww_mutex_unlock(&bos[j]->resv->lock);
-
- if (contended_lock != -1 && contended_lock >= i) {
- struct v3d_bo *bo = bos[contended_lock];
-
- ww_mutex_unlock(&bo->resv->lock);
- }
-
- if (ret == -EDEADLK) {
- contended_lock = i;
- goto retry;
- }
-
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- ww_acquire_done(acquire_ctx);
+ ret = drm_gem_lock_reservations((struct drm_gem_object **)bos,
+ bo_count, acquire_ctx);
+ if (ret)
+ return ret;
/* Reserve space for our shared (read-only) fence references,
* before we commit the CL to the hardware.
*/
for (i = 0; i < bo_count; i++) {
- ret = reservation_object_reserve_shared(bos[i]->resv, 1);
+ ret = reservation_object_reserve_shared(bos[i]->base.base.resv,
+ 1);
if (ret) {
v3d_unlock_bo_reservations(bos, bo_count,
acquire_ctx);
@@ -378,11 +347,11 @@ v3d_exec_cleanup(struct kref *ref)
dma_fence_put(exec->render_done_fence);
for (i = 0; i < exec->bo_count; i++)
- drm_gem_object_put_unlocked(&exec->bo[i]->base);
+ drm_gem_object_put_unlocked(&exec->bo[i]->base.base);
kvfree(exec->bo);
list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
- drm_gem_object_put_unlocked(&bo->base);
+ drm_gem_object_put_unlocked(&bo->base.base);
}
pm_runtime_mark_last_busy(v3d->dev);
@@ -409,7 +378,7 @@ v3d_tfu_job_cleanup(struct kref *ref)
for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
if (job->bo[i])
- drm_gem_object_put_unlocked(&job->bo[i]->base);
+ drm_gem_object_put_unlocked(&job->bo[i]->base.base);
}
pm_runtime_mark_last_busy(v3d->dev);
@@ -429,8 +398,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
{
int ret;
struct drm_v3d_wait_bo *args = data;
- struct drm_gem_object *gem_obj;
- struct v3d_bo *bo;
ktime_t start = ktime_get();
u64 delta_ns;
unsigned long timeout_jiffies =
@@ -439,21 +406,8 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- gem_obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!gem_obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
- return -EINVAL;
- }
- bo = to_v3d_bo(gem_obj);
-
- ret = reservation_object_wait_timeout_rcu(bo->resv,
- true, true,
- timeout_jiffies);
-
- if (ret == 0)
- ret = -ETIME;
- else if (ret > 0)
- ret = 0;
+ ret = drm_gem_reservation_object_wait(file_priv, args->handle,
+ true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
* such that the ioctl will be restarted.
@@ -468,8 +422,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (ret == -ETIME && args->timeout_ns)
ret = -EAGAIN;
- drm_gem_object_put_unlocked(gem_obj);
-
return ret;
}
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 69338da70ddc..b4d6ae81186d 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -27,6 +27,9 @@
V3D_HUB_INT_MMU_CAP | \
V3D_HUB_INT_TFUC))
+static irqreturn_t
+v3d_hub_irq(int irq, void *arg);
+
static void
v3d_overflow_mem_work(struct work_struct *work)
{
@@ -34,12 +37,14 @@ v3d_overflow_mem_work(struct work_struct *work)
container_of(work, struct v3d_dev, overflow_mem_work);
struct drm_device *dev = &v3d->drm;
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
+ struct drm_gem_object *obj;
unsigned long irqflags;
if (IS_ERR(bo)) {
DRM_ERROR("Couldn't allocate binner overflow mem\n");
return;
}
+ obj = &bo->base.base;
/* We lost a race, and our work task came in after the bin job
* completed and exited. This can happen because the HW
@@ -56,15 +61,15 @@ v3d_overflow_mem_work(struct work_struct *work)
goto out;
}
- drm_gem_object_get(&bo->base);
+ drm_gem_object_get(obj);
list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
- V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size);
+ V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
out:
- drm_gem_object_put_unlocked(&bo->base);
+ drm_gem_object_put_unlocked(obj);
}
static irqreturn_t
@@ -112,6 +117,12 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_GMPV)
dev_err(v3d->dev, "GMP violation\n");
+ /* V3D 4.2 wires the hub and core IRQs together, so if we &
+ * didn't see the common one then check hub for MMU IRQs.
+ */
+ if (v3d->single_irq_line && status == IRQ_NONE)
+ return v3d_hub_irq(irq, arg);
+
return status;
}
@@ -156,10 +167,10 @@ v3d_hub_irq(int irq, void *arg)
return status;
}
-void
+int
v3d_irq_init(struct v3d_dev *v3d)
{
- int ret, core;
+ int irq1, ret, core;
INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
@@ -170,16 +181,37 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
- v3d_hub_irq, IRQF_SHARED,
- "v3d_hub", v3d);
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
- v3d_irq, IRQF_SHARED,
- "v3d_core0", v3d);
- if (ret)
- dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ irq1 = platform_get_irq(v3d->pdev, 1);
+ if (irq1 == -EPROBE_DEFER)
+ return irq1;
+ if (irq1 > 0) {
+ ret = devm_request_irq(v3d->dev, irq1,
+ v3d_irq, IRQF_SHARED,
+ "v3d_core0", v3d);
+ if (ret)
+ goto fail;
+ ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ v3d_hub_irq, IRQF_SHARED,
+ "v3d_hub", v3d);
+ if (ret)
+ goto fail;
+ } else {
+ v3d->single_irq_line = true;
+
+ ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ v3d_irq, IRQF_SHARED,
+ "v3d", v3d);
+ if (ret)
+ goto fail;
+ }
v3d_irq_enable(v3d);
+ return 0;
+
+fail:
+ if (ret != -EPROBE_DEFER)
+ dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ return ret;
}
void
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index b00f97c31b70..7a21f1787ab1 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -83,13 +83,14 @@ int v3d_mmu_set_page_table(struct v3d_dev *v3d)
void v3d_mmu_insert_ptes(struct v3d_bo *bo)
{
- struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
+ struct drm_gem_shmem_object *shmem_obj = &bo->base;
+ struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
u32 page = bo->node.start;
u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
unsigned int count;
struct scatterlist *sgl;
- for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) {
+ for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) {
u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
u32 pte = page_prot | page_address;
u32 i;
@@ -102,7 +103,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
}
WARN_ON_ONCE(page - bo->node.start !=
- bo->base.size >> V3D_MMU_PAGE_SHIFT);
+ shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
if (v3d_mmu_flush_all(v3d))
dev_err(v3d->dev, "MMU flush timeout\n");
@@ -110,8 +111,8 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
void v3d_mmu_remove_ptes(struct v3d_bo *bo)
{
- struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
- u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT;
+ struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
+ u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
u32 page;
for (page = bo->node.start; page < bo->node.start + npages; page++)
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 6ccdee9d47bd..8e88af237610 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -216,6 +216,8 @@
# define V3D_IDENT2_BCG_INT BIT(28)
#define V3D_CTL_MISCCFG 0x00018
+# define V3D_CTL_MISCCFG_QRMAXCNT_MASK V3D_MASK(3, 1)
+# define V3D_CTL_MISCCFG_QRMAXCNT_SHIFT 1
# define V3D_MISCCFG_OVRTMUOUT BIT(0)
#define V3D_CTL_L2CACTL 0x00020
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 4704b2df3688..d0c68b7c8b41 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -231,20 +231,17 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
mutex_lock(&v3d->reset_lock);
/* block scheduler */
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
-
- drm_sched_stop(sched);
+ for (q = 0; q < V3D_MAX_QUEUES; q++)
+ drm_sched_stop(&v3d->queue[q].sched);
- if(sched_job)
- drm_sched_increase_karma(sched_job);
- }
+ if (sched_job)
+ drm_sched_increase_karma(sched_job);
/* get the GPU back into the init state */
v3d_reset(v3d);
for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_resubmit_jobs(sched_job->sched);
+ drm_sched_resubmit_jobs(&v3d->queue[q].sched);
/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {