diff options
Diffstat (limited to 'drivers/gpu/drm/v3d')
-rw-r--r-- | drivers/gpu/drm/v3d/Makefile | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_bo.c | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_debugfs.c | 130 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_drv.c | 148 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_drv.h | 100 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_gem.c | 42 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_gemfs.c | 50 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_irq.c | 126 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_mmu.c | 93 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_perfmon.c | 308 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_performance_counters.h | 33 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_regs.h | 55 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_sched.c | 419 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_submit.c | 309 | ||||
-rw-r--r-- | drivers/gpu/drm/v3d/v3d_sysfs.c | 13 |
15 files changed, 1335 insertions, 529 deletions
diff --git a/drivers/gpu/drm/v3d/Makefile b/drivers/gpu/drm/v3d/Makefile index b7d673f1153b..fcf710926057 100644 --- a/drivers/gpu/drm/v3d/Makefile +++ b/drivers/gpu/drm/v3d/Makefile @@ -13,7 +13,8 @@ v3d-y := \ v3d_trace_points.o \ v3d_sched.o \ v3d_sysfs.o \ - v3d_submit.o + v3d_submit.o \ + v3d_gemfs.o v3d-$(CONFIG_DEBUG_FS) += v3d_debugfs.o diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index a07ede668cc1..bb7815599435 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -13,18 +13,26 @@ * Display engines requiring physically contiguous allocations should * look into Mesa's "renderonly" support (as used by the Mesa pl111 * driver) for an example of how to integrate with V3D. - * - * Long term, we should support evicting pages from the MMU when under - * memory pressure (thus the v3d_bo_get_pages() refcounting), but - * that's not a high priority since our systems tend to not have swap. */ #include <linux/dma-buf.h> #include <linux/pfn_t.h> +#include <linux/vmalloc.h> #include "v3d_drv.h" #include "uapi/drm/v3d_drm.h" +static enum drm_gem_object_status v3d_gem_status(struct drm_gem_object *obj) +{ + struct v3d_bo *bo = to_v3d_bo(obj); + enum drm_gem_object_status res = 0; + + if (bo->base.pages) + res |= DRM_GEM_OBJECT_RESIDENT; + + return res; +} + /* Called DRM core on the last userspace/kernel unreference of the * BO. */ @@ -62,6 +70,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = drm_gem_shmem_object_mmap, + .status = v3d_gem_status, .vm_ops = &drm_gem_shmem_vm_ops, }; @@ -94,6 +103,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) struct v3d_dev *v3d = to_v3d_dev(obj->dev); struct v3d_bo *bo = to_v3d_bo(obj); struct sg_table *sgt; + u64 align; int ret; /* So far we pin the BO in the MMU for its lifetime, so use @@ -103,6 +113,15 @@ v3d_bo_create_finish(struct drm_gem_object *obj) if (IS_ERR(sgt)) return PTR_ERR(sgt); + if (!v3d->gemfs) + align = SZ_4K; + else if (obj->size >= SZ_1M) + align = SZ_1M; + else if (obj->size >= SZ_64K) + align = SZ_64K; + else + align = SZ_4K; + spin_lock(&v3d->mm_lock); /* Allocate the object's space in the GPU's page tables. * Inserting PTEs will happen later, but the offset is for the @@ -110,7 +129,7 @@ v3d_bo_create_finish(struct drm_gem_object *obj) */ ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node, obj->size >> V3D_MMU_PAGE_SHIFT, - GMP_GRANULARITY >> V3D_MMU_PAGE_SHIFT, 0, 0); + align >> V3D_MMU_PAGE_SHIFT, 0, 0); spin_unlock(&v3d->mm_lock); if (ret) return ret; @@ -130,10 +149,12 @@ struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv, size_t unaligned_size) { struct drm_gem_shmem_object *shmem_obj; + struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_bo *bo; int ret; - shmem_obj = drm_gem_shmem_create(dev, unaligned_size); + shmem_obj = drm_gem_shmem_create_with_mnt(dev, unaligned_size, + v3d->gemfs); if (IS_ERR(shmem_obj)) return ERR_CAST(shmem_obj); bo = to_v3d_bo(&shmem_obj->base); @@ -278,7 +299,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data, else args->timeout_ns = 0; - /* Asked to wait beyond the jiffie/scheduler precision? */ + /* Asked to wait beyond the jiffy/scheduler precision? */ if (ret == -ETIME && args->timeout_ns) ret = -EAGAIN; diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index 19e3ee7ac897..7e789e181af0 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -21,74 +21,74 @@ struct v3d_reg_def { }; static const struct v3d_reg_def v3d_hub_reg_defs[] = { - REGDEF(33, 42, V3D_HUB_AXICFG), - REGDEF(33, 71, V3D_HUB_UIFCFG), - REGDEF(33, 71, V3D_HUB_IDENT0), - REGDEF(33, 71, V3D_HUB_IDENT1), - REGDEF(33, 71, V3D_HUB_IDENT2), - REGDEF(33, 71, V3D_HUB_IDENT3), - REGDEF(33, 71, V3D_HUB_INT_STS), - REGDEF(33, 71, V3D_HUB_INT_MSK_STS), - - REGDEF(33, 71, V3D_MMU_CTL), - REGDEF(33, 71, V3D_MMU_VIO_ADDR), - REGDEF(33, 71, V3D_MMU_VIO_ID), - REGDEF(33, 71, V3D_MMU_DEBUG_INFO), - - REGDEF(71, 71, V3D_GMP_STATUS(71)), - REGDEF(71, 71, V3D_GMP_CFG(71)), - REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_HUB_AXICFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_UIFCFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT0), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT1), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT2), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT3), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_MSK_STS), + + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_CTL), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ADDR), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ID), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_DEBUG_INFO), + + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_STATUS(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_CFG(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_VIO_ADDR(71)), }; static const struct v3d_reg_def v3d_gca_reg_defs[] = { - REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN), - REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK), + REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN), + REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN_ACK), }; static const struct v3d_reg_def v3d_core_reg_defs[] = { - REGDEF(33, 71, V3D_CTL_IDENT0), - REGDEF(33, 71, V3D_CTL_IDENT1), - REGDEF(33, 71, V3D_CTL_IDENT2), - REGDEF(33, 71, V3D_CTL_MISCCFG), - REGDEF(33, 71, V3D_CTL_INT_STS), - REGDEF(33, 71, V3D_CTL_INT_MSK_STS), - REGDEF(33, 71, V3D_CLE_CT0CS), - REGDEF(33, 71, V3D_CLE_CT0CA), - REGDEF(33, 71, V3D_CLE_CT0EA), - REGDEF(33, 71, V3D_CLE_CT1CS), - REGDEF(33, 71, V3D_CLE_CT1CA), - REGDEF(33, 71, V3D_CLE_CT1EA), - - REGDEF(33, 71, V3D_PTB_BPCA), - REGDEF(33, 71, V3D_PTB_BPCS), - - REGDEF(33, 42, V3D_GMP_STATUS(33)), - REGDEF(33, 42, V3D_GMP_CFG(33)), - REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)), - - REGDEF(33, 71, V3D_ERR_FDBGO), - REGDEF(33, 71, V3D_ERR_FDBGB), - REGDEF(33, 71, V3D_ERR_FDBGS), - REGDEF(33, 71, V3D_ERR_STAT), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT0), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT1), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT2), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_MISCCFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_MSK_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0EA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1EA), + + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCS), + + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_STATUS(33)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_CFG(33)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_VIO_ADDR(33)), + + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGO), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGB), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_STAT), }; static const struct v3d_reg_def v3d_csd_reg_defs[] = { - REGDEF(41, 71, V3D_CSD_STATUS), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)), - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7), + REGDEF(V3D_GEN_41, V3D_GEN_71, V3D_CSD_STATUS), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG0(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG1(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG2(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG3(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG4(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG5(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG6(41)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG0(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG1(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG2(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG3(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG4(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG5(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG6(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG7), }; static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) @@ -164,7 +164,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU)); seq_printf(m, "TFU: %s\n", str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU)); - if (v3d->ver <= 42) { + if (v3d->ver <= V3D_GEN_42) { seq_printf(m, "TSY: %s\n", str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY)); } @@ -196,11 +196,11 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) seq_printf(m, " QPUs: %d\n", nslc * qups); seq_printf(m, " Semaphores: %d\n", V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM)); - if (v3d->ver <= 42) { + if (v3d->ver <= V3D_GEN_42) { seq_printf(m, " BCG int: %d\n", (ident2 & V3D_IDENT2_BCG_INT) != 0); } - if (v3d->ver < 40) { + if (v3d->ver < V3D_GEN_41) { seq_printf(m, " Override TMU: %d\n", (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); } @@ -234,11 +234,11 @@ static int v3d_measure_clock(struct seq_file *m, void *unused) int core = 0; int measure_ms = 1000; - if (v3d->ver >= 40) { + if (v3d->ver >= V3D_GEN_41) { int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver); V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3, - V3D_SET_FIELD(cycle_count_reg, - V3D_PCTR_S0)); + V3D_SET_FIELD_VER(cycle_count_reg, + V3D_PCTR_S0, v3d->ver)); V3D_CORE_WRITE(core, V3D_V4_PCTR_0_CLR, 1); V3D_CORE_WRITE(core, V3D_V4_PCTR_0_EN, 1); } else { diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 3debf37e7d9b..5e997ae8bc9c 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -17,6 +17,7 @@ #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/sched/clock.h> @@ -31,11 +32,17 @@ #define DRIVER_NAME "v3d" #define DRIVER_DESC "Broadcom V3D graphics" -#define DRIVER_DATE "20180419" #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 #define DRIVER_PATCHLEVEL 0 +/* Only expose the `super_pages` modparam if THP is enabled. */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +bool super_pages = true; +module_param_named(super_pages, super_pages, bool, 0400); +MODULE_PARM_DESC(super_pages, "Enable/Disable Super Pages support."); +#endif + static int v3d_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -86,7 +93,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, args->value = 1; return 0; case DRM_V3D_PARAM_SUPPORTS_PERFMON: - args->value = (v3d->ver >= 40); + args->value = (v3d->ver >= V3D_GEN_41); return 0; case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT: args->value = 1; @@ -94,6 +101,12 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE: args->value = 1; return 0; + case DRM_V3D_PARAM_MAX_PERF_COUNTERS: + args->value = v3d->perfmon_info.max_counters; + return 0; + case DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES: + args->value = !!v3d->gemfs; + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; @@ -115,14 +128,13 @@ v3d_open(struct drm_device *dev, struct drm_file *file) v3d_priv->v3d = v3d; for (i = 0; i < V3D_MAX_QUEUES; i++) { - v3d_priv->enabled_ns[i] = 0; - v3d_priv->start_ns[i] = 0; - v3d_priv->jobs_sent[i] = 0; - sched = &v3d->queue[i].sched; drm_sched_entity_init(&v3d_priv->sched_entity[i], DRM_SCHED_PRIORITY_NORMAL, &sched, 1, NULL); + + memset(&v3d_priv->stats[i], 0, sizeof(v3d_priv->stats[i])); + seqcount_init(&v3d_priv->stats[i].lock); } v3d_perfmon_open_file(v3d_priv); @@ -144,6 +156,20 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) kfree(v3d_priv); } +void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp, + u64 *active_runtime, u64 *jobs_completed) +{ + unsigned int seq; + + do { + seq = read_seqcount_begin(&stats->lock); + *active_runtime = stats->enabled_ns; + if (stats->start_ns) + *active_runtime += timestamp - stats->start_ns; + *jobs_completed = stats->jobs_completed; + } while (read_seqcount_retry(&stats->lock, seq)); +} + static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file) { struct v3d_file_priv *file_priv = file->driver_priv; @@ -151,21 +177,25 @@ static void v3d_show_fdinfo(struct drm_printer *p, struct drm_file *file) enum v3d_queue queue; for (queue = 0; queue < V3D_MAX_QUEUES; queue++) { + struct v3d_stats *stats = &file_priv->stats[queue]; + u64 active_runtime, jobs_completed; + + v3d_get_stats(stats, timestamp, &active_runtime, &jobs_completed); + /* Note that, in case of a GPU reset, the time spent during an * attempt of executing the job is not computed in the runtime. */ drm_printf(p, "drm-engine-%s: \t%llu ns\n", - v3d_queue_to_string(queue), - file_priv->start_ns[queue] ? file_priv->enabled_ns[queue] - + timestamp - file_priv->start_ns[queue] - : file_priv->enabled_ns[queue]); + v3d_queue_to_string(queue), active_runtime); /* Note that we only count jobs that completed. Therefore, jobs * that were resubmitted due to a GPU reset are not computed. */ drm_printf(p, "v3d-jobs-%s: \t%llu jobs\n", - v3d_queue_to_string(queue), file_priv->jobs_sent[queue]); + v3d_queue_to_string(queue), jobs_completed); } + + drm_show_memory_stats(p, file); } static const struct file_operations v3d_drm_fops = { @@ -193,6 +223,8 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(V3D_PERFMON_DESTROY, v3d_perfmon_destroy_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_VALUES, v3d_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(V3D_SUBMIT_CPU, v3d_submit_cpu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_GET_COUNTER, v3d_perfmon_get_counter_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(V3D_PERFMON_SET_GLOBAL, v3d_perfmon_set_global_ioctl, DRM_RENDER_ALLOW), }; static const struct drm_driver v3d_drm_driver = { @@ -217,21 +249,50 @@ static const struct drm_driver v3d_drm_driver = { .name = DRIVER_NAME, .desc = DRIVER_DESC, - .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; static const struct of_device_id v3d_of_match[] = { - { .compatible = "brcm,2711-v3d" }, - { .compatible = "brcm,2712-v3d" }, - { .compatible = "brcm,7268-v3d" }, - { .compatible = "brcm,7278-v3d" }, + { .compatible = "brcm,2711-v3d", .data = (void *)V3D_GEN_42 }, + { .compatible = "brcm,2712-v3d", .data = (void *)V3D_GEN_71 }, + { .compatible = "brcm,7268-v3d", .data = (void *)V3D_GEN_33 }, + { .compatible = "brcm,7278-v3d", .data = (void *)V3D_GEN_41 }, {}, }; MODULE_DEVICE_TABLE(of, v3d_of_match); +static void +v3d_idle_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_CLEAR_POWER_OFF); + + if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS), + V3D_SMS_STATE) == V3D_SMS_IDLE), 100)) { + DRM_ERROR("Failed to power up SMS\n"); + } + + v3d_reset_sms(v3d); +} + +static void +v3d_power_off_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_POWER_OFF); + + if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS), + V3D_SMS_STATE) == V3D_SMS_POWER_OFF_STATE), 100)) { + DRM_ERROR("Failed to power off SMS\n"); + } +} + static int map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name) { @@ -244,9 +305,10 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct drm_device *drm; struct v3d_dev *v3d; + enum v3d_gen gen; int ret; u32 mmu_debug; - u32 ident1; + u32 ident1, ident3; u64 mask; v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm); @@ -257,6 +319,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drm); + gen = (uintptr_t)of_device_get_match_data(dev); + v3d->ver = gen; + ret = map_regs(v3d, &v3d->hub_regs, "hub"); if (ret) return ret; @@ -265,47 +330,76 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) return ret; + if (v3d->ver >= V3D_GEN_71) { + ret = map_regs(v3d, &v3d->sms_regs, "sms"); + if (ret) + return ret; + } + + v3d->clk = devm_clk_get_optional(dev, NULL); + if (IS_ERR(v3d->clk)) + return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n"); + + ret = clk_prepare_enable(v3d->clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the V3D clock\n"); + return ret; + } + + v3d_idle_sms(v3d); + mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); ret = dma_set_mask_and_coherent(dev, mask); if (ret) - return ret; + goto clk_disable; v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH); ident1 = V3D_READ(V3D_HUB_IDENT1); v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV)); + /* Make sure that the V3D tech version retrieved from the HW is equal + * to the one advertised by the device tree. + */ + WARN_ON(v3d->ver != gen); + v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ + ident3 = V3D_READ(V3D_HUB_IDENT3); + v3d->rev = V3D_GET_FIELD(ident3, V3D_HUB_IDENT3_IPREV); + + v3d_perfmon_init(v3d); + v3d->reset = devm_reset_control_get_exclusive(dev, NULL); if (IS_ERR(v3d->reset)) { ret = PTR_ERR(v3d->reset); if (ret == -EPROBE_DEFER) - return ret; + goto clk_disable; v3d->reset = NULL; ret = map_regs(v3d, &v3d->bridge_regs, "bridge"); if (ret) { dev_err(dev, "Failed to get reset control or bridge regs\n"); - return ret; + goto clk_disable; } } - if (v3d->ver < 41) { + if (v3d->ver < V3D_GEN_41) { ret = map_regs(v3d, &v3d->gca_regs, "gca"); if (ret) - return ret; + goto clk_disable; } v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (!v3d->mmu_scratch) { dev_err(dev, "Failed to allocate MMU scratch page\n"); - return -ENOMEM; + ret = -ENOMEM; + goto clk_disable; } ret = v3d_gem_init(drm); @@ -334,6 +428,8 @@ gem_destroy: v3d_gem_destroy(drm); dma_free: dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); +clk_disable: + clk_disable_unprepare(v3d->clk); return ret; } @@ -351,11 +447,15 @@ static void v3d_platform_drm_remove(struct platform_device *pdev) dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); + + v3d_power_off_sms(v3d); + + clk_disable_unprepare(v3d->clk); } static struct platform_driver v3d_platform_driver = { .probe = v3d_platform_drm_probe, - .remove_new = v3d_platform_drm_remove, + .remove = v3d_platform_drm_remove, .driver = { .name = "v3d", .of_match_table = v3d_of_match, diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 1950c723dde1..b51f0b648a08 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -11,15 +11,16 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/gpu_scheduler.h> +#include "v3d_performance_counters.h" + #include "uapi/drm/v3d_drm.h" struct clk; struct platform_device; struct reset_control; -#define GMP_GRANULARITY (128 * 1024) - #define V3D_MMU_PAGE_SHIFT 12 +#define V3D_PAGE_FACTOR (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) #define V3D_MAX_QUEUES (V3D_CPU + 1) @@ -36,15 +37,27 @@ static inline char *v3d_queue_to_string(enum v3d_queue queue) return "UNKNOWN"; } +struct v3d_stats { + u64 start_ns; + u64 enabled_ns; + u64 jobs_completed; + + /* + * This seqcount is used to protect the access to the GPU stats + * variables. It must be used as, while we are reading the stats, + * IRQs can happen and the stats can be updated. + */ + seqcount_t lock; +}; + struct v3d_queue_state { struct drm_gpu_scheduler sched; u64 fence_context; u64 emit_seqno; - u64 start_ns; - u64 enabled_ns; - u64 jobs_sent; + /* Stores the GPU stats for this queue in the global context. */ + struct v3d_stats stats; }; /* Performance monitor object. The perform lifetime is controlled by userspace @@ -81,19 +94,31 @@ struct v3d_perfmon { u64 values[] __counted_by(ncounters); }; +enum v3d_gen { + V3D_GEN_33 = 33, + V3D_GEN_41 = 41, + V3D_GEN_42 = 42, + V3D_GEN_71 = 71, +}; + struct v3d_dev { struct drm_device drm; - /* Short representation (e.g. 33, 41) of the V3D tech version - * and revision. - */ - int ver; + /* Short representation (e.g. 33, 41) of the V3D tech version */ + enum v3d_gen ver; + + /* Short representation (e.g. 5, 6) of the V3D tech revision */ + int rev; + bool single_irq_line; + struct v3d_perfmon_info perfmon_info; + void __iomem *hub_regs; void __iomem *core_regs[3]; void __iomem *bridge_regs; void __iomem *gca_regs; + void __iomem *sms_regs; struct clk *clk; struct reset_control *reset; @@ -119,13 +144,17 @@ struct v3d_dev { struct drm_mm mm; spinlock_t mm_lock; + /* + * tmpfs instance used for shmem backed objects + */ + struct vfsmount *gemfs; + struct work_struct overflow_mem_work; struct v3d_bin_job *bin_job; struct v3d_render_job *render_job; struct v3d_tfu_job *tfu_job; struct v3d_csd_job *csd_job; - struct v3d_cpu_job *cpu_job; struct v3d_queue_state queue[V3D_MAX_QUEUES]; @@ -161,6 +190,12 @@ struct v3d_dev { u32 num_allocated; u32 pages_allocated; } bo_stats; + + /* To support a performance analysis tool in user space, we require + * a single, globally configured performance monitor (perfmon) for + * all jobs. + */ + struct v3d_perfmon *global_perfmon; }; static inline struct v3d_dev * @@ -172,7 +207,7 @@ to_v3d_dev(struct drm_device *dev) static inline bool v3d_has_csd(struct v3d_dev *v3d) { - return v3d->ver >= 41; + return v3d->ver >= V3D_GEN_41; } #define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev) @@ -188,11 +223,8 @@ struct v3d_file_priv { struct drm_sched_entity sched_entity[V3D_MAX_QUEUES]; - u64 start_ns[V3D_MAX_QUEUES]; - - u64 enabled_ns[V3D_MAX_QUEUES]; - - u64 jobs_sent[V3D_MAX_QUEUES]; + /* Stores the GPU stats for a specific queue for this fd. */ + struct v3d_stats stats[V3D_MAX_QUEUES]; }; struct v3d_bo { @@ -237,6 +269,15 @@ to_v3d_fence(struct dma_fence *fence) #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset) #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset) +#define V3D_SMS_IDLE 0x0 +#define V3D_SMS_ISOLATING_FOR_RESET 0xa +#define V3D_SMS_RESETTING 0xb +#define V3D_SMS_ISOLATING_FOR_POWER_OFF 0xc +#define V3D_SMS_POWER_OFF_STATE 0xd + +#define V3D_SMS_READ(offset) readl(v3d->sms_regs + (offset)) +#define V3D_SMS_WRITE(offset, val) writel(val, v3d->sms_regs + (offset)) + #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset) #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset) @@ -335,13 +376,9 @@ struct v3d_timestamp_query { struct drm_syncobj *syncobj; }; -/* Number of perfmons required to handle all supported performance counters */ -#define V3D_MAX_PERFMONS DIV_ROUND_UP(V3D_PERFCNT_NUM, \ - DRM_V3D_MAX_PERF_COUNTERS) - struct v3d_performance_query { /* Performance monitor IDs for this query */ - u32 kperfmon_ids[V3D_MAX_PERFMONS]; + u32 *kperfmon_ids; /* Syncobj that indicates the query availability */ struct drm_syncobj *syncobj; @@ -508,6 +545,10 @@ struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev, /* v3d_debugfs.c */ void v3d_debugfs_init(struct drm_minor *minor); +/* v3d_drv.c */ +void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp, + u64 *active_runtime, u64 *jobs_completed); + /* v3d_fence.c */ extern const struct dma_fence_ops v3d_fence_ops; struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); @@ -515,10 +556,16 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); /* v3d_gem.c */ int v3d_gem_init(struct drm_device *dev); void v3d_gem_destroy(struct drm_device *dev); +void v3d_reset_sms(struct v3d_dev *v3d); void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_clean_caches(struct v3d_dev *v3d); +/* v3d_gemfs.c */ +extern bool super_pages; +void v3d_gemfs_init(struct v3d_dev *v3d); +void v3d_gemfs_fini(struct v3d_dev *v3d); + /* v3d_submit.c */ void v3d_job_cleanup(struct v3d_job *job); void v3d_job_put(struct v3d_job *job); @@ -538,15 +585,22 @@ void v3d_irq_disable(struct v3d_dev *v3d); void v3d_irq_reset(struct v3d_dev *v3d); /* v3d_mmu.c */ +int v3d_mmu_flush_all(struct v3d_dev *v3d); int v3d_mmu_set_page_table(struct v3d_dev *v3d); void v3d_mmu_insert_ptes(struct v3d_bo *bo); void v3d_mmu_remove_ptes(struct v3d_bo *bo); /* v3d_sched.c */ +void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info, + unsigned int count); +void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info, + unsigned int count); +void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue); int v3d_sched_init(struct v3d_dev *v3d); void v3d_sched_fini(struct v3d_dev *v3d); /* v3d_perfmon.c */ +void v3d_perfmon_init(struct v3d_dev *v3d); void v3d_perfmon_get(struct v3d_perfmon *perfmon); void v3d_perfmon_put(struct v3d_perfmon *perfmon); void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon); @@ -561,6 +615,10 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int v3d_perfmon_set_global_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* v3d_sysfs.c */ int v3d_sysfs_init(struct device *dev); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index afc565078c78..d7d16da78db3 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -25,7 +25,7 @@ v3d_init_core(struct v3d_dev *v3d, int core) * type. If you want the default behavior, you can still put * "2" in the indirect texture state's output_type field. */ - if (v3d->ver < 40) + if (v3d->ver < V3D_GEN_41) V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); /* Whenever we flush the L2T cache, we always want to flush @@ -58,7 +58,7 @@ v3d_idle_axi(struct v3d_dev *v3d, int core) static void v3d_idle_gca(struct v3d_dev *v3d) { - if (v3d->ver >= 41) + if (v3d->ver >= V3D_GEN_41) return; V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); @@ -105,6 +105,22 @@ v3d_reset_v3d(struct v3d_dev *v3d) } void +v3d_reset_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_REE_CS, V3D_SET_FIELD(0x4, V3D_SMS_STATE)); + + if (wait_for(!(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS), + V3D_SMS_STATE) == V3D_SMS_ISOLATING_FOR_RESET) && + !(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS), + V3D_SMS_STATE) == V3D_SMS_RESETTING), 100)) { + DRM_ERROR("Failed to wait for SMS reset\n"); + } +} + +void v3d_reset(struct v3d_dev *v3d) { struct drm_device *dev = &v3d->drm; @@ -119,6 +135,7 @@ v3d_reset(struct v3d_dev *v3d) v3d_idle_axi(v3d, 0); v3d_idle_gca(v3d); + v3d_reset_sms(v3d); v3d_reset_v3d(v3d); v3d_mmu_set_page_table(v3d); @@ -132,13 +149,13 @@ v3d_reset(struct v3d_dev *v3d) static void v3d_flush_l3(struct v3d_dev *v3d) { - if (v3d->ver < 41) { + if (v3d->ver < V3D_GEN_41) { u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); - if (v3d->ver < 33) { + if (v3d->ver < V3D_GEN_33) { V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); } @@ -151,7 +168,7 @@ v3d_flush_l3(struct v3d_dev *v3d) static void v3d_invalidate_l2c(struct v3d_dev *v3d, int core) { - if (v3d->ver > 32) + if (v3d->ver >= V3D_GEN_33) return; V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, @@ -247,10 +264,11 @@ v3d_gem_init(struct drm_device *dev) int ret, i; for (i = 0; i < V3D_MAX_QUEUES; i++) { - v3d->queue[i].fence_context = dma_fence_context_alloc(1); - v3d->queue[i].start_ns = 0; - v3d->queue[i].enabled_ns = 0; - v3d->queue[i].jobs_sent = 0; + struct v3d_queue_state *queue = &v3d->queue[i]; + + queue->fence_context = dma_fence_context_alloc(1); + memset(&queue->stats, 0, sizeof(queue->stats)); + seqcount_init(&queue->stats.lock); } spin_lock_init(&v3d->mm_lock); @@ -287,11 +305,14 @@ v3d_gem_init(struct drm_device *dev) v3d_init_hw_state(v3d); v3d_mmu_set_page_table(v3d); + v3d_gemfs_init(v3d); + ret = v3d_sched_init(v3d); if (ret) { drm_mm_takedown(&v3d->mm); - dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt, + dma_free_coherent(v3d->drm.dev, pt_size, (void *)v3d->pt, v3d->pt_paddr); + return ret; } return 0; @@ -303,6 +324,7 @@ v3d_gem_destroy(struct drm_device *dev) struct v3d_dev *v3d = to_v3d_dev(dev); v3d_sched_fini(v3d); + v3d_gemfs_fini(v3d); /* Waiting for jobs to finish would need to be done before * unregistering V3D. diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c new file mode 100644 index 000000000000..4c5e18590a5c --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_gemfs.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (C) 2024 Raspberry Pi */ + +#include <linux/fs.h> +#include <linux/mount.h> + +#include "v3d_drv.h" + +void v3d_gemfs_init(struct v3d_dev *v3d) +{ + char huge_opt[] = "huge=within_size"; + struct file_system_type *type; + struct vfsmount *gemfs; + + /* + * By creating our own shmemfs mountpoint, we can pass in + * mount flags that better match our usecase. However, we + * only do so on platforms which benefit from it. + */ + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + goto err; + + /* The user doesn't want to enable Super Pages */ + if (!super_pages) + goto err; + + type = get_fs_type("tmpfs"); + if (!type) + goto err; + + gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt); + if (IS_ERR(gemfs)) + goto err; + + v3d->gemfs = gemfs; + drm_info(&v3d->drm, "Using Transparent Hugepages\n"); + + return; + +err: + v3d->gemfs = NULL; + drm_notice(&v3d->drm, + "Transparent Hugepage support is recommended for optimal performance on this platform!\n"); +} + +void v3d_gemfs_fini(struct v3d_dev *v3d) +{ + if (v3d->gemfs) + kern_unmount(v3d->gemfs); +} diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index ce6b2fb341d1..2cca5d3a26a2 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -70,6 +70,8 @@ v3d_overflow_mem_work(struct work_struct *work) list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list); spin_unlock_irqrestore(&v3d->job_lock, irqflags); + v3d_mmu_flush_all(v3d); + V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << V3D_MMU_PAGE_SHIFT); V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size); @@ -102,67 +104,46 @@ v3d_irq(int irq, void *arg) if (intsts & V3D_INT_FLDONE) { struct v3d_fence *fence = to_v3d_fence(v3d->bin_job->base.irq_fence); - struct v3d_file_priv *file = v3d->bin_job->base.file->driver_priv; - u64 runtime = local_clock() - file->start_ns[V3D_BIN]; - - file->jobs_sent[V3D_BIN]++; - v3d->queue[V3D_BIN].jobs_sent++; - - file->start_ns[V3D_BIN] = 0; - v3d->queue[V3D_BIN].start_ns = 0; - - file->enabled_ns[V3D_BIN] += runtime; - v3d->queue[V3D_BIN].enabled_ns += runtime; + v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN); trace_v3d_bcl_irq(&v3d->drm, fence->seqno); + + v3d->bin_job = NULL; dma_fence_signal(&fence->base); + status = IRQ_HANDLED; } if (intsts & V3D_INT_FRDONE) { struct v3d_fence *fence = to_v3d_fence(v3d->render_job->base.irq_fence); - struct v3d_file_priv *file = v3d->render_job->base.file->driver_priv; - u64 runtime = local_clock() - file->start_ns[V3D_RENDER]; - - file->jobs_sent[V3D_RENDER]++; - v3d->queue[V3D_RENDER].jobs_sent++; - - file->start_ns[V3D_RENDER] = 0; - v3d->queue[V3D_RENDER].start_ns = 0; - - file->enabled_ns[V3D_RENDER] += runtime; - v3d->queue[V3D_RENDER].enabled_ns += runtime; + v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER); trace_v3d_rcl_irq(&v3d->drm, fence->seqno); + + v3d->render_job = NULL; dma_fence_signal(&fence->base); + status = IRQ_HANDLED; } if (intsts & V3D_INT_CSDDONE(v3d->ver)) { struct v3d_fence *fence = to_v3d_fence(v3d->csd_job->base.irq_fence); - struct v3d_file_priv *file = v3d->csd_job->base.file->driver_priv; - u64 runtime = local_clock() - file->start_ns[V3D_CSD]; - - file->jobs_sent[V3D_CSD]++; - v3d->queue[V3D_CSD].jobs_sent++; - - file->start_ns[V3D_CSD] = 0; - v3d->queue[V3D_CSD].start_ns = 0; - - file->enabled_ns[V3D_CSD] += runtime; - v3d->queue[V3D_CSD].enabled_ns += runtime; + v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD); trace_v3d_csd_irq(&v3d->drm, fence->seqno); + + v3d->csd_job = NULL; dma_fence_signal(&fence->base); + status = IRQ_HANDLED; } /* We shouldn't be triggering these if we have GMP in * always-allowed mode. */ - if (v3d->ver < 71 && (intsts & V3D_INT_GMPV)) + if (v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_GMPV)) dev_err(v3d->drm.dev, "GMP violation\n"); /* V3D 4.2 wires the hub and core IRQs together, so if we & @@ -189,20 +170,13 @@ v3d_hub_irq(int irq, void *arg) if (intsts & V3D_HUB_INT_TFUC) { struct v3d_fence *fence = to_v3d_fence(v3d->tfu_job->base.irq_fence); - struct v3d_file_priv *file = v3d->tfu_job->base.file->driver_priv; - u64 runtime = local_clock() - file->start_ns[V3D_TFU]; - - file->jobs_sent[V3D_TFU]++; - v3d->queue[V3D_TFU].jobs_sent++; - - file->start_ns[V3D_TFU] = 0; - v3d->queue[V3D_TFU].start_ns = 0; - - file->enabled_ns[V3D_TFU] += runtime; - v3d->queue[V3D_TFU].enabled_ns += runtime; + v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU); trace_v3d_tfu_irq(&v3d->drm, fence->seqno); + + v3d->tfu_job = NULL; dma_fence_signal(&fence->base); + status = IRQ_HANDLED; } @@ -212,27 +186,59 @@ v3d_hub_irq(int irq, void *arg) u32 axi_id = V3D_READ(V3D_MMU_VIO_ID); u64 vio_addr = ((u64)V3D_READ(V3D_MMU_VIO_ADDR) << (v3d->va_width - 32)); - static const char *const v3d41_axi_ids[] = { - "L2T", - "PTB", - "PSE", - "TLB", - "CLE", - "TFU", - "MMU", - "GMP", + static const struct { + u32 begin; + u32 end; + const char *client; + } v3d41_axi_ids[] = { + {0x00, 0x20, "L2T"}, + {0x20, 0x21, "PTB"}, + {0x40, 0x41, "PSE"}, + {0x60, 0x80, "TLB"}, + {0x80, 0x88, "CLE"}, + {0xA0, 0xA1, "TFU"}, + {0xC0, 0xE0, "MMU"}, + {0xE0, 0xE1, "GMP"}, + }, v3d71_axi_ids[] = { + {0x00, 0x30, "L2T"}, + {0x30, 0x38, "CLE"}, + {0x38, 0x39, "PTB"}, + {0x39, 0x3A, "PSE"}, + {0x3A, 0x3B, "CSD"}, + {0x40, 0x60, "TLB"}, + {0x60, 0x70, "MMU"}, + {0x7C, 0x7E, "TFU"}, + {0x7F, 0x80, "GMP"}, }; const char *client = "?"; V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL)); - if (v3d->ver >= 41) { - axi_id = axi_id >> 5; - if (axi_id < ARRAY_SIZE(v3d41_axi_ids)) - client = v3d41_axi_ids[axi_id]; + if (v3d->ver >= V3D_GEN_71) { + size_t i; + + axi_id = axi_id & 0x7F; + for (i = 0; i < ARRAY_SIZE(v3d71_axi_ids); i++) { + if (axi_id >= v3d71_axi_ids[i].begin && + axi_id < v3d71_axi_ids[i].end) { + client = v3d71_axi_ids[i].client; + break; + } + } + } else if (v3d->ver >= V3D_GEN_41) { + size_t i; + + axi_id = axi_id & 0xFF; + for (i = 0; i < ARRAY_SIZE(v3d41_axi_ids); i++) { + if (axi_id >= v3d41_axi_ids[i].begin && + axi_id < v3d41_axi_ids[i].end) { + client = v3d41_axi_ids[i].client; + break; + } + } } - dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n", + dev_err(v3d->drm.dev, "MMU error from client %s (0x%x) at 0x%llx%s%s%s\n", client, axi_id, (long long)vio_addr, ((intsts & V3D_HUB_INT_MMU_WRV) ? ", write violation" : ""), @@ -243,7 +249,7 @@ v3d_hub_irq(int irq, void *arg) status = IRQ_HANDLED; } - if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) { + if (v3d->ver >= V3D_GEN_71 && (intsts & V3D_V7_HUB_INT_GMPV)) { dev_err(v3d->drm.dev, "GMP Violation\n"); status = IRQ_HANDLED; } diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c index 14f3af40d6f6..a25d25a8ae61 100644 --- a/drivers/gpu/drm/v3d/v3d_mmu.c +++ b/drivers/gpu/drm/v3d/v3d_mmu.c @@ -4,7 +4,7 @@ /** * DOC: Broadcom V3D MMU * - * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has + * The V3D 3.x hardware (compared to VC4) now includes an MMU. It has * a single level of page tables for the V3D's 4GB address space to * map to AXI bus addresses, thus it could need up to 4MB of * physically contiguous memory to store the PTEs. @@ -15,49 +15,47 @@ * * To protect clients from each other, we should use the GMP to * quickly mask out (at 128kb granularity) what pages are available to - * each client. This is not yet implemented. + * each client. This is not yet implemented. */ #include "v3d_drv.h" #include "v3d_regs.h" -/* Note: All PTEs for the 1MB superpage must be filled with the - * superpage bit set. +/* Note: All PTEs for the 64KB bigpage or 1MB superpage must be filled + * with the bigpage/superpage bit set. */ #define V3D_PTE_SUPERPAGE BIT(31) +#define V3D_PTE_BIGPAGE BIT(30) #define V3D_PTE_WRITEABLE BIT(29) #define V3D_PTE_VALID BIT(28) -static int v3d_mmu_flush_all(struct v3d_dev *v3d) +static bool v3d_mmu_is_aligned(u32 page, u32 page_address, size_t alignment) { - int ret; - - /* Make sure that another flush isn't already running when we - * start this one. - */ - ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & - V3D_MMU_CTL_TLB_CLEARING), 100); - if (ret) - dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n"); + return IS_ALIGNED(page, alignment >> V3D_MMU_PAGE_SHIFT) && + IS_ALIGNED(page_address, alignment >> V3D_MMU_PAGE_SHIFT); +} - V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | - V3D_MMU_CTL_TLB_CLEAR); +int v3d_mmu_flush_all(struct v3d_dev *v3d) +{ + int ret; - V3D_WRITE(V3D_MMUC_CONTROL, - V3D_MMUC_CONTROL_FLUSH | + V3D_WRITE(V3D_MMUC_CONTROL, V3D_MMUC_CONTROL_FLUSH | V3D_MMUC_CONTROL_ENABLE); - ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & - V3D_MMU_CTL_TLB_CLEARING), 100); + ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & + V3D_MMUC_CONTROL_FLUSHING), 100); if (ret) { - dev_err(v3d->drm.dev, "TLB clear wait idle failed\n"); + dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); return ret; } - ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) & - V3D_MMUC_CONTROL_FLUSHING), 100); + V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) | + V3D_MMU_CTL_TLB_CLEAR); + + ret = wait_for(!(V3D_READ(V3D_MMU_CTL) & + V3D_MMU_CTL_TLB_CLEARING), 100); if (ret) - dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n"); + dev_err(v3d->drm.dev, "MMU TLB clear wait idle failed\n"); return ret; } @@ -87,19 +85,40 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo) struct drm_gem_shmem_object *shmem_obj = &bo->base; struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev); u32 page = bo->node.start; - u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; - struct sg_dma_page_iter dma_iter; - - for_each_sgtable_dma_page(shmem_obj->sgt, &dma_iter, 0) { - dma_addr_t dma_addr = sg_page_iter_dma_address(&dma_iter); - u32 page_address = dma_addr >> V3D_MMU_PAGE_SHIFT; - u32 pte = page_prot | page_address; - u32 i; - - BUG_ON(page_address + (PAGE_SIZE >> V3D_MMU_PAGE_SHIFT) >= - BIT(24)); - for (i = 0; i < PAGE_SIZE >> V3D_MMU_PAGE_SHIFT; i++) - v3d->pt[page++] = pte + i; + struct scatterlist *sgl; + unsigned int count; + + for_each_sgtable_dma_sg(shmem_obj->sgt, sgl, count) { + dma_addr_t dma_addr = sg_dma_address(sgl); + u32 pfn = dma_addr >> V3D_MMU_PAGE_SHIFT; + unsigned int len = sg_dma_len(sgl); + + while (len > 0) { + u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID; + u32 page_address = page_prot | pfn; + unsigned int i, page_size; + + BUG_ON(pfn + V3D_PAGE_FACTOR >= BIT(24)); + + if (len >= SZ_1M && + v3d_mmu_is_aligned(page, page_address, SZ_1M)) { + page_size = SZ_1M; + page_address |= V3D_PTE_SUPERPAGE; + } else if (len >= SZ_64K && + v3d_mmu_is_aligned(page, page_address, SZ_64K)) { + page_size = SZ_64K; + page_address |= V3D_PTE_BIGPAGE; + } else { + page_size = SZ_4K; + } + + for (i = 0; i < page_size >> V3D_MMU_PAGE_SHIFT; i++) { + v3d->pt[page++] = page_address + i; + pfn++; + } + + len -= page_size; + } } WARN_ON_ONCE(page - bo->node.start != diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index e1be7368b87d..9a3fe5255874 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -9,6 +9,209 @@ #define V3D_PERFMONID_MIN 1 #define V3D_PERFMONID_MAX U32_MAX +static const struct v3d_perf_counter_desc v3d_v42_performance_counters[] = { + {"FEP", "FEP-valid-primitives-no-rendered-pixels", "[FEP] Valid primitives that result in no rendered pixels, for all rendered tiles"}, + {"FEP", "FEP-valid-primitives-rendered-pixels", "[FEP] Valid primitives for all rendered tiles (primitives may be counted in more than one tile)"}, + {"FEP", "FEP-clipped-quads", "[FEP] Early-Z/Near/Far clipped quads"}, + {"FEP", "FEP-valid-quads", "[FEP] Valid quads"}, + {"TLB", "TLB-quads-not-passing-stencil-test", "[TLB] Quads with no pixels passing the stencil test"}, + {"TLB", "TLB-quads-not-passing-z-and-stencil-test", "[TLB] Quads with no pixels passing the Z and stencil tests"}, + {"TLB", "TLB-quads-passing-z-and-stencil-test", "[TLB] Quads with any pixels passing the Z and stencil tests"}, + {"TLB", "TLB-quads-with-zero-coverage", "[TLB] Quads with all pixels having zero coverage"}, + {"TLB", "TLB-quads-with-non-zero-coverage", "[TLB] Quads with any pixels having non-zero coverage"}, + {"TLB", "TLB-quads-written-to-color-buffer", "[TLB] Quads with valid pixels written to colour buffer"}, + {"PTB", "PTB-primitives-discarded-outside-viewport", "[PTB] Primitives discarded by being outside the viewport"}, + {"PTB", "PTB-primitives-need-clipping", "[PTB] Primitives that need clipping"}, + {"PTB", "PTB-primitives-discarded-reversed", "[PTB] Primitives that are discarded because they are reversed"}, + {"QPU", "QPU-total-idle-clk-cycles", "[QPU] Total idle clock cycles for all QPUs"}, + {"QPU", "QPU-total-active-clk-cycles-vertex-coord-shading", "[QPU] Total active clock cycles for all QPUs doing vertex/coordinate/user shading (counts only when QPU is not stalled)"}, + {"QPU", "QPU-total-active-clk-cycles-fragment-shading", "[QPU] Total active clock cycles for all QPUs doing fragment shading (counts only when QPU is not stalled)"}, + {"QPU", "QPU-total-clk-cycles-executing-valid-instr", "[QPU] Total clock cycles for all QPUs executing valid instructions"}, + {"QPU", "QPU-total-clk-cycles-waiting-TMU", "[QPU] Total clock cycles for all QPUs stalled waiting for TMUs only (counter won't increment if QPU also stalling for another reason)"}, + {"QPU", "QPU-total-clk-cycles-waiting-scoreboard", "[QPU] Total clock cycles for all QPUs stalled waiting for Scoreboard only (counter won't increment if QPU also stalling for another reason)"}, + {"QPU", "QPU-total-clk-cycles-waiting-varyings", "[QPU] Total clock cycles for all QPUs stalled waiting for Varyings only (counter won't increment if QPU also stalling for another reason)"}, + {"QPU", "QPU-total-instr-cache-hit", "[QPU] Total instruction cache hits for all slices"}, + {"QPU", "QPU-total-instr-cache-miss", "[QPU] Total instruction cache misses for all slices"}, + {"QPU", "QPU-total-uniform-cache-hit", "[QPU] Total uniforms cache hits for all slices"}, + {"QPU", "QPU-total-uniform-cache-miss", "[QPU] Total uniforms cache misses for all slices"}, + {"TMU", "TMU-total-text-quads-access", "[TMU] Total texture cache accesses"}, + {"TMU", "TMU-total-text-cache-miss", "[TMU] Total texture cache misses (number of fetches from memory/L2cache)"}, + {"VPM", "VPM-total-clk-cycles-VDW-stalled", "[VPM] Total clock cycles VDW is stalled waiting for VPM access"}, + {"VPM", "VPM-total-clk-cycles-VCD-stalled", "[VPM] Total clock cycles VCD is stalled waiting for VPM access"}, + {"CLE", "CLE-bin-thread-active-cycles", "[CLE] Bin thread active cycles"}, + {"CLE", "CLE-render-thread-active-cycles", "[CLE] Render thread active cycles"}, + {"L2T", "L2T-total-cache-hit", "[L2T] Total Level 2 cache hits"}, + {"L2T", "L2T-total-cache-miss", "[L2T] Total Level 2 cache misses"}, + {"CORE", "cycle-count", "[CORE] Cycle counter"}, + {"QPU", "QPU-total-clk-cycles-waiting-vertex-coord-shading", "[QPU] Total stalled clock cycles for all QPUs doing vertex/coordinate/user shading"}, + {"QPU", "QPU-total-clk-cycles-waiting-fragment-shading", "[QPU] Total stalled clock cycles for all QPUs doing fragment shading"}, + {"PTB", "PTB-primitives-binned", "[PTB] Total primitives binned"}, + {"AXI", "AXI-writes-seen-watch-0", "[AXI] Writes seen by watch 0"}, + {"AXI", "AXI-reads-seen-watch-0", "[AXI] Reads seen by watch 0"}, + {"AXI", "AXI-writes-stalled-seen-watch-0", "[AXI] Write stalls seen by watch 0"}, + {"AXI", "AXI-reads-stalled-seen-watch-0", "[AXI] Read stalls seen by watch 0"}, + {"AXI", "AXI-write-bytes-seen-watch-0", "[AXI] Total bytes written seen by watch 0"}, + {"AXI", "AXI-read-bytes-seen-watch-0", "[AXI] Total bytes read seen by watch 0"}, + {"AXI", "AXI-writes-seen-watch-1", "[AXI] Writes seen by watch 1"}, + {"AXI", "AXI-reads-seen-watch-1", "[AXI] Reads seen by watch 1"}, + {"AXI", "AXI-writes-stalled-seen-watch-1", "[AXI] Write stalls seen by watch 1"}, + {"AXI", "AXI-reads-stalled-seen-watch-1", "[AXI] Read stalls seen by watch 1"}, + {"AXI", "AXI-write-bytes-seen-watch-1", "[AXI] Total bytes written seen by watch 1"}, + {"AXI", "AXI-read-bytes-seen-watch-1", "[AXI] Total bytes read seen by watch 1"}, + {"TLB", "TLB-partial-quads-written-to-color-buffer", "[TLB] Partial quads written to the colour buffer"}, + {"TMU", "TMU-total-config-access", "[TMU] Total config accesses"}, + {"L2T", "L2T-no-id-stalled", "[L2T] No ID stall"}, + {"L2T", "L2T-command-queue-stalled", "[L2T] Command queue full stall"}, + {"L2T", "L2T-TMU-writes", "[L2T] TMU write accesses"}, + {"TMU", "TMU-active-cycles", "[TMU] Active cycles"}, + {"TMU", "TMU-stalled-cycles", "[TMU] Stalled cycles"}, + {"CLE", "CLE-thread-active-cycles", "[CLE] Bin or render thread active cycles"}, + {"L2T", "L2T-TMU-reads", "[L2T] TMU read accesses"}, + {"L2T", "L2T-CLE-reads", "[L2T] CLE read accesses"}, + {"L2T", "L2T-VCD-reads", "[L2T] VCD read accesses"}, + {"L2T", "L2T-TMU-config-reads", "[L2T] TMU CFG read accesses"}, + {"L2T", "L2T-SLC0-reads", "[L2T] SLC0 read accesses"}, + {"L2T", "L2T-SLC1-reads", "[L2T] SLC1 read accesses"}, + {"L2T", "L2T-SLC2-reads", "[L2T] SLC2 read accesses"}, + {"L2T", "L2T-TMU-write-miss", "[L2T] TMU write misses"}, + {"L2T", "L2T-TMU-read-miss", "[L2T] TMU read misses"}, + {"L2T", "L2T-CLE-read-miss", "[L2T] CLE read misses"}, + {"L2T", "L2T-VCD-read-miss", "[L2T] VCD read misses"}, + {"L2T", "L2T-TMU-config-read-miss", "[L2T] TMU CFG read misses"}, + {"L2T", "L2T-SLC0-read-miss", "[L2T] SLC0 read misses"}, + {"L2T", "L2T-SLC1-read-miss", "[L2T] SLC1 read misses"}, + {"L2T", "L2T-SLC2-read-miss", "[L2T] SLC2 read misses"}, + {"CORE", "core-memory-writes", "[CORE] Total memory writes"}, + {"L2T", "L2T-memory-writes", "[L2T] Total memory writes"}, + {"PTB", "PTB-memory-writes", "[PTB] Total memory writes"}, + {"TLB", "TLB-memory-writes", "[TLB] Total memory writes"}, + {"CORE", "core-memory-reads", "[CORE] Total memory reads"}, + {"L2T", "L2T-memory-reads", "[L2T] Total memory reads"}, + {"PTB", "PTB-memory-reads", "[PTB] Total memory reads"}, + {"PSE", "PSE-memory-reads", "[PSE] Total memory reads"}, + {"TLB", "TLB-memory-reads", "[TLB] Total memory reads"}, + {"GMP", "GMP-memory-reads", "[GMP] Total memory reads"}, + {"PTB", "PTB-memory-words-writes", "[PTB] Total memory words written"}, + {"TLB", "TLB-memory-words-writes", "[TLB] Total memory words written"}, + {"PSE", "PSE-memory-words-reads", "[PSE] Total memory words read"}, + {"TLB", "TLB-memory-words-reads", "[TLB] Total memory words read"}, + {"TMU", "TMU-MRU-hits", "[TMU] Total MRU hits"}, + {"CORE", "compute-active-cycles", "[CORE] Compute active cycles"}, +}; + +static const struct v3d_perf_counter_desc v3d_v71_performance_counters[] = { + {"CORE", "cycle-count", "[CORE] Cycle counter"}, + {"CORE", "core-active", "[CORE] Bin/Render/Compute active cycles"}, + {"CLE", "CLE-bin-thread-active-cycles", "[CLE] Bin thread active cycles"}, + {"CLE", "CLE-render-thread-active-cycles", "[CLE] Render thread active cycles"}, + {"CORE", "compute-active-cycles", "[CORE] Compute active cycles"}, + {"FEP", "FEP-valid-primitives-no-rendered-pixels", "[FEP] Valid primitives that result in no rendered pixels, for all rendered tiles"}, + {"FEP", "FEP-valid-primitives-rendered-pixels", "[FEP] Valid primitives for all rendered tiles (primitives may be counted in more than one tile)"}, + {"FEP", "FEP-clipped-quads", "[FEP] Early-Z/Near/Far clipped quads"}, + {"FEP", "FEP-valid-quads", "[FEP] Valid quads"}, + {"TLB", "TLB-quads-not-passing-stencil-test", "[TLB] Quads with no pixels passing the stencil test"}, + {"TLB", "TLB-quads-not-passing-z-and-stencil-test", "[TLB] Quads with no pixels passing the Z and stencil tests"}, + {"TLB", "TLB-quads-passing-z-and-stencil-test", "[TLB] Quads with any pixels passing the Z and stencil tests"}, + {"TLB", "TLB-quads-written-to-color-buffer", "[TLB] Quads with valid pixels written to colour buffer"}, + {"TLB", "TLB-partial-quads-written-to-color-buffer", "[TLB] Partial quads written to the colour buffer"}, + {"PTB", "PTB-primitives-need-clipping", "[PTB] Primitives that need clipping"}, + {"PTB", "PTB-primitives-discarded-outside-viewport", "[PTB] Primitives discarded by being outside the viewport"}, + {"PTB", "PTB-primitives-binned", "[PTB] Total primitives binned"}, + {"PTB", "PTB-primitives-discarded-reversed", "[PTB] Primitives that are discarded because they are reversed"}, + {"QPU", "QPU-total-instr-cache-hit", "[QPU] Total instruction cache hits for all slices"}, + {"QPU", "QPU-total-instr-cache-miss", "[QPU] Total instruction cache misses for all slices"}, + {"QPU", "QPU-total-uniform-cache-hit", "[QPU] Total uniforms cache hits for all slices"}, + {"QPU", "QPU-total-uniform-cache-miss", "[QPU] Total uniforms cache misses for all slices"}, + {"TMU", "TMU-active-cycles", "[TMU] Active cycles"}, + {"TMU", "TMU-stalled-cycles", "[TMU] Stalled cycles"}, + {"TMU", "TMU-total-text-quads-access", "[TMU] Total texture cache accesses"}, + {"TMU", "TMU-cache-x4-active-cycles", "[TMU] Cache active cycles for x4 access"}, + {"TMU", "TMU-cache-x4-stalled-cycles", "[TMU] Cache stalled cycles for x4 access"}, + {"TMU", "TMU-total-text-quads-x4-access", "[TMU] Total texture cache x4 access"}, + {"L2T", "L2T-total-cache-hit", "[L2T] Total Level 2 cache hits"}, + {"L2T", "L2T-total-cache-miss", "[L2T] Total Level 2 cache misses"}, + {"L2T", "L2T-local", "[L2T] Local mode access"}, + {"L2T", "L2T-writeback", "[L2T] Writeback"}, + {"L2T", "L2T-zero", "[L2T] Zero"}, + {"L2T", "L2T-merge", "[L2T] Merge"}, + {"L2T", "L2T-fill", "[L2T] Fill"}, + {"L2T", "L2T-stalls-no-wid", "[L2T] Stalls because no WID available"}, + {"L2T", "L2T-stalls-no-rid", "[L2T] Stalls because no RID available"}, + {"L2T", "L2T-stalls-queue-full", "[L2T] Stalls because internal queue full"}, + {"L2T", "L2T-stalls-wrightback", "[L2T] Stalls because writeback in flight"}, + {"L2T", "L2T-stalls-mem", "[L2T] Stalls because AXI blocks read"}, + {"L2T", "L2T-stalls-fill", "[L2T] Stalls because fill pending for victim cache-line"}, + {"L2T", "L2T-hitq", "[L2T] Sent request via hit queue"}, + {"L2T", "L2T-hitq-full", "[L2T] Sent request via main queue because hit queue is full"}, + {"L2T", "L2T-stalls-read-data", "[L2T] Stalls because waiting for data from SDRAM"}, + {"L2T", "L2T-TMU-read-hits", "[L2T] TMU read hits"}, + {"L2T", "L2T-TMU-read-miss", "[L2T] TMU read misses"}, + {"L2T", "L2T-VCD-read-hits", "[L2T] VCD read hits"}, + {"L2T", "L2T-VCD-read-miss", "[L2T] VCD read misses"}, + {"L2T", "L2T-SLC-read-hits", "[L2T] SLC read hits (all slices)"}, + {"L2T", "L2T-SLC-read-miss", "[L2T] SLC read misses (all slices)"}, + {"AXI", "AXI-writes-seen-watch-0", "[AXI] Writes seen by watch 0"}, + {"AXI", "AXI-reads-seen-watch-0", "[AXI] Reads seen by watch 0"}, + {"AXI", "AXI-writes-stalled-seen-watch-0", "[AXI] Write stalls seen by watch 0"}, + {"AXI", "AXI-reads-stalled-seen-watch-0", "[AXI] Read stalls seen by watch 0"}, + {"AXI", "AXI-write-bytes-seen-watch-0", "[AXI] Total bytes written seen by watch 0"}, + {"AXI", "AXI-read-bytes-seen-watch-0", "[AXI] Total bytes read seen by watch 0"}, + {"AXI", "AXI-writes-seen-watch-1", "[AXI] Writes seen by watch 1"}, + {"AXI", "AXI-reads-seen-watch-1", "[AXI] Reads seen by watch 1"}, + {"AXI", "AXI-writes-stalled-seen-watch-1", "[AXI] Write stalls seen by watch 1"}, + {"AXI", "AXI-reads-stalled-seen-watch-1", "[AXI] Read stalls seen by watch 1"}, + {"AXI", "AXI-write-bytes-seen-watch-1", "[AXI] Total bytes written seen by watch 1"}, + {"AXI", "AXI-read-bytes-seen-watch-1", "[AXI] Total bytes read seen by watch 1"}, + {"CORE", "core-memory-writes", "[CORE] Total memory writes"}, + {"L2T", "L2T-memory-writes", "[L2T] Total memory writes"}, + {"PTB", "PTB-memory-writes", "[PTB] Total memory writes"}, + {"TLB", "TLB-memory-writes", "[TLB] Total memory writes"}, + {"CORE", "core-memory-reads", "[CORE] Total memory reads"}, + {"L2T", "L2T-memory-reads", "[L2T] Total memory reads"}, + {"PTB", "PTB-memory-reads", "[PTB] Total memory reads"}, + {"PSE", "PSE-memory-reads", "[PSE] Total memory reads"}, + {"TLB", "TLB-memory-reads", "[TLB] Total memory reads"}, + {"PTB", "PTB-memory-words-writes", "[PTB] Total memory words written"}, + {"TLB", "TLB-memory-words-writes", "[TLB] Total memory words written"}, + {"PSE", "PSE-memory-words-reads", "[PSE] Total memory words read"}, + {"TLB", "TLB-memory-words-reads", "[TLB] Total memory words read"}, + {"AXI", "AXI-read-trans", "[AXI] Read transaction count"}, + {"AXI", "AXI-write-trans", "[AXI] Write transaction count"}, + {"AXI", "AXI-read-wait-cycles", "[AXI] Read total wait cycles"}, + {"AXI", "AXI-write-wait-cycles", "[AXI] Write total wait cycles"}, + {"AXI", "AXI-max-outstanding-reads", "[AXI] Maximum outstanding read transactions"}, + {"AXI", "AXI-max-outstanding-writes", "[AXI] Maximum outstanding write transactions"}, + {"QPU", "QPU-wait-bubble", "[QPU] Pipeline bubble in qcycles due all threads waiting"}, + {"QPU", "QPU-ic-miss-bubble", "[QPU] Pipeline bubble in qcycles due instruction-cache miss"}, + {"QPU", "QPU-active", "[QPU] Executed shader instruction"}, + {"QPU", "QPU-total-active-clk-cycles-fragment-shading", "[QPU] Total active clock cycles for all QPUs doing fragment shading (counts only when QPU is not stalled)"}, + {"QPU", "QPU-stalls", "[QPU] Stalled qcycles executing shader instruction"}, + {"QPU", "QPU-total-clk-cycles-waiting-fragment-shading", "[QPU] Total stalled clock cycles for all QPUs doing fragment shading"}, + {"QPU", "QPU-stalls-TMU", "[QPU] Stalled qcycles waiting for TMU"}, + {"QPU", "QPU-stalls-TLB", "[QPU] Stalled qcycles waiting for TLB"}, + {"QPU", "QPU-stalls-VPM", "[QPU] Stalled qcycles waiting for VPM"}, + {"QPU", "QPU-stalls-uniforms", "[QPU] Stalled qcycles waiting for uniforms"}, + {"QPU", "QPU-stalls-SFU", "[QPU] Stalled qcycles waiting for SFU"}, + {"QPU", "QPU-stalls-other", "[QPU] Stalled qcycles waiting for any other reason (vary/W/Z)"}, +}; + +void v3d_perfmon_init(struct v3d_dev *v3d) +{ + const struct v3d_perf_counter_desc *counters = NULL; + unsigned int max = 0; + + if (v3d->ver >= V3D_GEN_71) { + counters = v3d_v71_performance_counters; + max = ARRAY_SIZE(v3d_v71_performance_counters); + } else if (v3d->ver >= V3D_GEN_42) { + counters = v3d_v42_performance_counters; + max = ARRAY_SIZE(v3d_v42_performance_counters); + } + + v3d->perfmon_info.max_counters = max; + v3d->perfmon_info.counters = counters; +} + void v3d_perfmon_get(struct v3d_perfmon *perfmon) { if (perfmon) @@ -37,23 +240,24 @@ void v3d_perfmon_start(struct v3d_dev *v3d, struct v3d_perfmon *perfmon) for (i = 0; i < ncounters; i++) { u32 source = i / 4; - u32 channel = V3D_SET_FIELD(perfmon->counters[i], V3D_PCTR_S0); + u32 channel = V3D_SET_FIELD_VER(perfmon->counters[i], V3D_PCTR_S0, + v3d->ver); i++; - channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, - V3D_PCTR_S1); + channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S1, v3d->ver); i++; - channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, - V3D_PCTR_S2); + channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S2, v3d->ver); i++; - channel |= V3D_SET_FIELD(i < ncounters ? perfmon->counters[i] : 0, - V3D_PCTR_S3); + channel |= V3D_SET_FIELD_VER(i < ncounters ? perfmon->counters[i] : 0, + V3D_PCTR_S3, v3d->ver); V3D_CORE_WRITE(0, V3D_V4_PCTR_0_SRC_X(source), channel); } + V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask); V3D_CORE_WRITE(0, V3D_V4_PCTR_0_CLR, mask); V3D_CORE_WRITE(0, V3D_PCTR_0_OVERFLOW, mask); - V3D_CORE_WRITE(0, V3D_V4_PCTR_0_EN, mask); v3d->active_perfmon = perfmon; } @@ -103,6 +307,14 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) static int v3d_perfmon_idr_del(int id, void *elem, void *data) { struct v3d_perfmon *perfmon = elem; + struct v3d_dev *v3d = (struct v3d_dev *)data; + + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == v3d->active_perfmon) + v3d_perfmon_stop(v3d, perfmon, false); + + /* If the global perfmon is being destroyed, set it to NULL */ + cmpxchg(&v3d->global_perfmon, perfmon, NULL); v3d_perfmon_put(perfmon); @@ -111,8 +323,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data) void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) { + struct v3d_dev *v3d = v3d_priv->v3d; + mutex_lock(&v3d_priv->perfmon.lock); - idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); + idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d); idr_destroy(&v3d_priv->perfmon.idr); mutex_unlock(&v3d_priv->perfmon.lock); mutex_destroy(&v3d_priv->perfmon.lock); @@ -123,6 +337,7 @@ int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, { struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_perfmon_create *req = data; + struct v3d_dev *v3d = v3d_priv->v3d; struct v3d_perfmon *perfmon; unsigned int i; int ret; @@ -134,7 +349,7 @@ int v3d_perfmon_create_ioctl(struct drm_device *dev, void *data, /* Make sure all counters are valid. */ for (i = 0; i < req->ncounters; i++) { - if (req->counters[i] >= V3D_PERFCNT_NUM) + if (req->counters[i] >= v3d->perfmon_info.max_counters) return -EINVAL; } @@ -172,6 +387,7 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, { struct v3d_file_priv *v3d_priv = file_priv->driver_priv; struct drm_v3d_perfmon_destroy *req = data; + struct v3d_dev *v3d = v3d_priv->v3d; struct v3d_perfmon *perfmon; mutex_lock(&v3d_priv->perfmon.lock); @@ -181,6 +397,13 @@ int v3d_perfmon_destroy_ioctl(struct drm_device *dev, void *data, if (!perfmon) return -EINVAL; + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == v3d->active_perfmon) + v3d_perfmon_stop(v3d, perfmon, false); + + /* If the global perfmon is being destroyed, set it to NULL */ + cmpxchg(&v3d->global_perfmon, perfmon, NULL); + v3d_perfmon_put(perfmon); return 0; @@ -198,11 +421,7 @@ int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, if (req->pad != 0) return -EINVAL; - mutex_lock(&v3d_priv->perfmon.lock); - perfmon = idr_find(&v3d_priv->perfmon.idr, req->id); - v3d_perfmon_get(perfmon); - mutex_unlock(&v3d_priv->perfmon.lock); - + perfmon = v3d_perfmon_find(v3d_priv, req->id); if (!perfmon) return -EINVAL; @@ -216,3 +435,62 @@ int v3d_perfmon_get_values_ioctl(struct drm_device *dev, void *data, return ret; } + +int v3d_perfmon_get_counter_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_v3d_perfmon_get_counter *req = data; + struct v3d_dev *v3d = to_v3d_dev(dev); + const struct v3d_perf_counter_desc *counter; + + for (int i = 0; i < ARRAY_SIZE(req->reserved); i++) { + if (req->reserved[i] != 0) + return -EINVAL; + } + + if (!v3d->perfmon_info.max_counters) + return -EOPNOTSUPP; + + /* Make sure that the counter ID is valid */ + if (req->counter >= v3d->perfmon_info.max_counters) + return -EINVAL; + + counter = &v3d->perfmon_info.counters[req->counter]; + + strscpy(req->name, counter->name, sizeof(req->name)); + strscpy(req->category, counter->category, sizeof(req->category)); + strscpy(req->description, counter->description, sizeof(req->description)); + + return 0; +} + +int v3d_perfmon_set_global_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct v3d_file_priv *v3d_priv = file_priv->driver_priv; + struct drm_v3d_perfmon_set_global *req = data; + struct v3d_dev *v3d = to_v3d_dev(dev); + struct v3d_perfmon *perfmon; + + if (req->flags & ~DRM_V3D_PERFMON_CLEAR_GLOBAL) + return -EINVAL; + + perfmon = v3d_perfmon_find(v3d_priv, req->id); + if (!perfmon) + return -EINVAL; + + /* If the request is to clear the global performance monitor */ + if (req->flags & DRM_V3D_PERFMON_CLEAR_GLOBAL) { + if (!v3d->global_perfmon) + return -EINVAL; + + xchg(&v3d->global_perfmon, NULL); + + return 0; + } + + if (cmpxchg(&v3d->global_perfmon, NULL, perfmon)) + return -EBUSY; + + return 0; +} diff --git a/drivers/gpu/drm/v3d/v3d_performance_counters.h b/drivers/gpu/drm/v3d/v3d_performance_counters.h new file mode 100644 index 000000000000..2bc4cce0744a --- /dev/null +++ b/drivers/gpu/drm/v3d/v3d_performance_counters.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright (C) 2024 Raspberry Pi + */ + +#ifndef V3D_PERFORMANCE_COUNTERS_H +#define V3D_PERFORMANCE_COUNTERS_H + +/* Holds a description of a given performance counter. The index of + * performance counter is given by the array on `v3d_performance_counter.c`. + */ +struct v3d_perf_counter_desc { + /* Category of the counter */ + char category[32]; + + /* Name of the counter */ + char name[64]; + + /* Description of the counter */ + char description[256]; +}; + +struct v3d_perfmon_info { + /* Different revisions of V3D have different total number of + * performance counters. + */ + unsigned int max_counters; + + /* Array of counters valid for the platform. */ + const struct v3d_perf_counter_desc *counters; +}; + +#endif diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index 1b1a62ad9585..c1870265eaee 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -15,6 +15,14 @@ fieldval & field##_MASK; \ }) +#define V3D_SET_FIELD_VER(value, field, ver) \ + ({ \ + typeof(ver) _ver = (ver); \ + u32 fieldval = (value) << field##_SHIFT(_ver); \ + WARN_ON((fieldval & ~field##_MASK(_ver)) != 0); \ + fieldval & field##_MASK(_ver); \ + }) + #define V3D_GET_FIELD(word, field) (((word) & field##_MASK) >> \ field##_SHIFT) @@ -354,18 +362,15 @@ #define V3D_V4_PCTR_0_SRC_28_31 0x0067c #define V3D_V4_PCTR_0_SRC_X(x) (V3D_V4_PCTR_0_SRC_0_3 + \ 4 * (x)) -# define V3D_PCTR_S0_MASK V3D_MASK(6, 0) -# define V3D_V7_PCTR_S0_MASK V3D_MASK(7, 0) -# define V3D_PCTR_S0_SHIFT 0 -# define V3D_PCTR_S1_MASK V3D_MASK(14, 8) -# define V3D_V7_PCTR_S1_MASK V3D_MASK(15, 8) -# define V3D_PCTR_S1_SHIFT 8 -# define V3D_PCTR_S2_MASK V3D_MASK(22, 16) -# define V3D_V7_PCTR_S2_MASK V3D_MASK(23, 16) -# define V3D_PCTR_S2_SHIFT 16 -# define V3D_PCTR_S3_MASK V3D_MASK(30, 24) -# define V3D_V7_PCTR_S3_MASK V3D_MASK(31, 24) -# define V3D_PCTR_S3_SHIFT 24 +# define V3D_PCTR_S0_MASK(ver) (((ver) >= 71) ? V3D_MASK(7, 0) : V3D_MASK(6, 0)) +# define V3D_PCTR_S0_SHIFT(ver) 0 +# define V3D_PCTR_S1_MASK(ver) (((ver) >= 71) ? V3D_MASK(15, 8) : V3D_MASK(14, 8)) +# define V3D_PCTR_S1_SHIFT(ver) 8 +# define V3D_PCTR_S2_MASK(ver) (((ver) >= 71) ? V3D_MASK(23, 16) : V3D_MASK(22, 16)) +# define V3D_PCTR_S2_SHIFT(ver) 16 +# define V3D_PCTR_S3_MASK(ver) (((ver) >= 71) ? V3D_MASK(31, 24) : V3D_MASK(30, 24)) +# define V3D_PCTR_S3_SHIFT(ver) 24 + #define V3D_PCTR_CYCLE_COUNT(ver) ((ver >= 71) ? 0 : 32) /* Output values of the counters. */ @@ -510,4 +515,30 @@ # define V3D_ERR_VPAERGS BIT(1) # define V3D_ERR_VPAEABB BIT(0) +#define V3D_SMS_REE_CS 0x00000 +#define V3D_SMS_TEE_CS 0x00400 +# define V3D_SMS_INTERRUPT BIT(31) +# define V3D_SMS_POWER_OFF BIT(30) +# define V3D_SMS_CLEAR_POWER_OFF BIT(29) +# define V3D_SMS_LOCK BIT(28) +# define V3D_SMS_CLEAR_LOCK BIT(27) +# define V3D_SMS_SVP_MODE_EXIT BIT(26) +# define V3D_SMS_CLEAR_SVP_MODE_EXIT BIT(25) +# define V3D_SMS_SVP_MODE_ENTER BIT(24) +# define V3D_SMS_CLEAR_SVP_MODE_ENTER BIT(23) +# define V3D_SMS_THEIR_MODE_EXIT BIT(22) +# define V3D_SMS_THEIR_MODE_ENTER BIT(21) +# define V3D_SMS_OUR_MODE_EXIT BIT(20) +# define V3D_SMS_CLEAR_OUR_MODE_EXIT BIT(19) +# define V3D_SMS_SEQ_PC_MASK V3D_MASK(16, 10) +# define V3D_SMS_SEQ_PC_SHIFT 10 +# define V3D_SMS_HUBCORE_STATUS_MASK V3D_MASK(9, 8) +# define V3D_SMS_HUBCORE_STATUS_SHIFT 8 +# define V3D_SMS_NEW_MODE_MASK V3D_MASK(7, 6) +# define V3D_SMS_NEW_MODE_SHIFT 6 +# define V3D_SMS_OLD_MODE_MASK V3D_MASK(5, 4) +# define V3D_SMS_OLD_MODE_SHIFT 4 +# define V3D_SMS_STATE_MASK V3D_MASK(3, 0) +# define V3D_SMS_STATE_SHIFT 0 + #endif /* V3D_REGS_H */ diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 54015ad765c7..35f131a46d07 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -5,16 +5,16 @@ * DOC: Broadcom V3D scheduling * * The shared DRM GPU scheduler is used to coordinate submitting jobs - * to the hardware. Each DRM fd (roughly a client process) gets its - * own scheduler entity, which will process jobs in order. The GPU - * scheduler will round-robin between clients to submit the next job. + * to the hardware. Each DRM fd (roughly a client process) gets its + * own scheduler entity, which will process jobs in order. The GPU + * scheduler will schedule the clients with a FIFO scheduling algorithm. * * For simplicity, and in order to keep latency low for interactive * jobs when bulk background jobs are queued up, we submit a new job * to the HW only when it has completed the last one, instead of - * filling up the CT[01]Q FIFOs with jobs. Similarly, we use - * drm_sched_job_add_dependency() to manage the dependency between bin and - * render, instead of having the clients submit jobs using the HW's + * filling up the CT[01]Q FIFOs with jobs. Similarly, we use + * `drm_sched_job_add_dependency()` to manage the dependency between bin + * and render, instead of having the clients submit jobs using the HW's * semaphores to interlock between them. */ @@ -73,24 +73,46 @@ v3d_sched_job_free(struct drm_sched_job *sched_job) v3d_job_cleanup(job); } +void +v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info, + unsigned int count) +{ + if (query_info->queries) { + unsigned int i; + + for (i = 0; i < count; i++) + drm_syncobj_put(query_info->queries[i].syncobj); + + kvfree(query_info->queries); + } +} + +void +v3d_performance_query_info_free(struct v3d_performance_query_info *query_info, + unsigned int count) +{ + if (query_info->queries) { + unsigned int i; + + for (i = 0; i < count; i++) { + drm_syncobj_put(query_info->queries[i].syncobj); + kvfree(query_info->queries[i].kperfmon_ids); + } + + kvfree(query_info->queries); + } +} + static void v3d_cpu_job_free(struct drm_sched_job *sched_job) { struct v3d_cpu_job *job = to_cpu_job(sched_job); - struct v3d_timestamp_query_info *timestamp_query = &job->timestamp_query; - struct v3d_performance_query_info *performance_query = &job->performance_query; - if (timestamp_query->queries) { - for (int i = 0; i < timestamp_query->count; i++) - drm_syncobj_put(timestamp_query->queries[i].syncobj); - kvfree(timestamp_query->queries); - } + v3d_timestamp_query_info_free(&job->timestamp_query, + job->timestamp_query.count); - if (performance_query->queries) { - for (int i = 0; i < performance_query->count; i++) - drm_syncobj_put(performance_query->queries[i].syncobj); - kvfree(performance_query->queries); - } + v3d_performance_query_info_free(&job->performance_query, + job->performance_query.count); v3d_job_cleanup(&job->base); } @@ -98,24 +120,118 @@ v3d_cpu_job_free(struct drm_sched_job *sched_job) static void v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) { - if (job->perfmon != v3d->active_perfmon) + struct v3d_perfmon *perfmon = v3d->global_perfmon; + + if (!perfmon) + perfmon = job->perfmon; + + if (perfmon == v3d->active_perfmon) + return; + + if (perfmon != v3d->active_perfmon) v3d_perfmon_stop(v3d, v3d->active_perfmon, true); - if (job->perfmon && v3d->active_perfmon != job->perfmon) - v3d_perfmon_start(v3d, job->perfmon); + if (perfmon && v3d->active_perfmon != perfmon) + v3d_perfmon_start(v3d, perfmon); +} + +static void +v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue) +{ + struct v3d_dev *v3d = job->v3d; + struct v3d_file_priv *file = job->file->driver_priv; + struct v3d_stats *global_stats = &v3d->queue[queue].stats; + struct v3d_stats *local_stats = &file->stats[queue]; + u64 now = local_clock(); + unsigned long flags; + + /* + * We only need to disable local interrupts to appease lockdep who + * otherwise would think v3d_job_start_stats vs v3d_stats_update has an + * unsafe in-irq vs no-irq-off usage problem. This is a false positive + * because all the locks are per queue and stats type, and all jobs are + * completely one at a time serialised. More specifically: + * + * 1. Locks for GPU queues are updated from interrupt handlers under a + * spin lock and started here with preemption disabled. + * + * 2. Locks for CPU queues are updated from the worker with preemption + * disabled and equally started here with preemption disabled. + * + * Therefore both are consistent. + * + * 3. Because next job can only be queued after the previous one has + * been signaled, and locks are per queue, there is also no scope for + * the start part to race with the update part. + */ + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_save(flags); + else + preempt_disable(); + + write_seqcount_begin(&local_stats->lock); + local_stats->start_ns = now; + write_seqcount_end(&local_stats->lock); + + write_seqcount_begin(&global_stats->lock); + global_stats->start_ns = now; + write_seqcount_end(&global_stats->lock); + + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_restore(flags); + else + preempt_enable(); +} + +static void +v3d_stats_update(struct v3d_stats *stats, u64 now) +{ + write_seqcount_begin(&stats->lock); + stats->enabled_ns += now - stats->start_ns; + stats->jobs_completed++; + stats->start_ns = 0; + write_seqcount_end(&stats->lock); +} + +void +v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) +{ + struct v3d_dev *v3d = job->v3d; + struct v3d_file_priv *file = job->file->driver_priv; + struct v3d_stats *global_stats = &v3d->queue[queue].stats; + struct v3d_stats *local_stats = &file->stats[queue]; + u64 now = local_clock(); + unsigned long flags; + + /* See comment in v3d_job_start_stats() */ + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_save(flags); + else + preempt_disable(); + + v3d_stats_update(local_stats, now); + v3d_stats_update(global_stats, now); + + if (IS_ENABLED(CONFIG_LOCKDEP)) + local_irq_restore(flags); + else + preempt_enable(); } static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) { struct v3d_bin_job *job = to_bin_job(sched_job); struct v3d_dev *v3d = job->base.v3d; - struct v3d_file_priv *file = job->base.file->driver_priv; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; unsigned long irqflags; - if (unlikely(job->base.base.s_fence->finished.error)) + if (unlikely(job->base.base.s_fence->finished.error)) { + spin_lock_irqsave(&v3d->job_lock, irqflags); + v3d->bin_job = NULL; + spin_unlock_irqrestore(&v3d->job_lock, irqflags); return NULL; + } /* Lock required around bin_job update vs * v3d_overflow_mem_work(). @@ -141,9 +257,7 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_cl(dev, false, to_v3d_fence(fence)->seqno, job->start, job->end); - file->start_ns[V3D_BIN] = local_clock(); - v3d->queue[V3D_BIN].start_ns = file->start_ns[V3D_BIN]; - + v3d_job_start_stats(&job->base, V3D_BIN); v3d_switch_perfmon(v3d, &job->base); /* Set the current and end address of the control list. @@ -168,12 +282,13 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) { struct v3d_render_job *job = to_render_job(sched_job); struct v3d_dev *v3d = job->base.v3d; - struct v3d_file_priv *file = job->base.file->driver_priv; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; - if (unlikely(job->base.base.s_fence->finished.error)) + if (unlikely(job->base.base.s_fence->finished.error)) { + v3d->render_job = NULL; return NULL; + } v3d->render_job = job; @@ -196,9 +311,7 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_cl(dev, true, to_v3d_fence(fence)->seqno, job->start, job->end); - file->start_ns[V3D_RENDER] = local_clock(); - v3d->queue[V3D_RENDER].start_ns = file->start_ns[V3D_RENDER]; - + v3d_job_start_stats(&job->base, V3D_RENDER); v3d_switch_perfmon(v3d, &job->base); /* XXX: Set the QCFG */ @@ -217,34 +330,38 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) { struct v3d_tfu_job *job = to_tfu_job(sched_job); struct v3d_dev *v3d = job->base.v3d; - struct v3d_file_priv *file = job->base.file->driver_priv; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; + if (unlikely(job->base.base.s_fence->finished.error)) { + v3d->tfu_job = NULL; + return NULL; + } + + v3d->tfu_job = job; + fence = v3d_fence_create(v3d, V3D_TFU); if (IS_ERR(fence)) return NULL; - v3d->tfu_job = job; if (job->base.irq_fence) dma_fence_put(job->base.irq_fence); job->base.irq_fence = dma_fence_get(fence); trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno); - file->start_ns[V3D_TFU] = local_clock(); - v3d->queue[V3D_TFU].start_ns = file->start_ns[V3D_TFU]; + v3d_job_start_stats(&job->base, V3D_TFU); V3D_WRITE(V3D_TFU_IIA(v3d->ver), job->args.iia); V3D_WRITE(V3D_TFU_IIS(v3d->ver), job->args.iis); V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica); V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua); V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa); - if (v3d->ver >= 71) + if (v3d->ver >= V3D_GEN_71) V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc); V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios); V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]); - if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) { + if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) { V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]); V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]); V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]); @@ -260,10 +377,14 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) { struct v3d_csd_job *job = to_csd_job(sched_job); struct v3d_dev *v3d = job->base.v3d; - struct v3d_file_priv *file = job->base.file->driver_priv; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; - int i, csd_cfg0_reg, csd_cfg_reg_count; + int i, csd_cfg0_reg; + + if (unlikely(job->base.base.s_fence->finished.error)) { + v3d->csd_job = NULL; + return NULL; + } v3d->csd_job = job; @@ -279,15 +400,21 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) trace_v3d_submit_csd(dev, to_v3d_fence(fence)->seqno); - file->start_ns[V3D_CSD] = local_clock(); - v3d->queue[V3D_CSD].start_ns = file->start_ns[V3D_CSD]; - + v3d_job_start_stats(&job->base, V3D_CSD); v3d_switch_perfmon(v3d, &job->base); csd_cfg0_reg = V3D_CSD_QUEUED_CFG0(v3d->ver); - csd_cfg_reg_count = v3d->ver < 71 ? 6 : 7; - for (i = 1; i <= csd_cfg_reg_count; i++) + for (i = 1; i <= 6; i++) V3D_CORE_WRITE(0, csd_cfg0_reg + 4 * i, job->args.cfg[i]); + + /* Although V3D 7.1 has an eighth configuration register, we are not + * using it. Therefore, make sure it remains unused. + * + * XXX: Set the CFG7 register + */ + if (v3d->ver >= V3D_GEN_71) + V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0); + /* CFG0 write kicks off the job. */ V3D_CORE_WRITE(0, csd_cfg0_reg, job->args.cfg[0]); @@ -301,7 +428,8 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) struct v3d_bo *bo = to_v3d_bo(job->base.bo[0]); struct v3d_bo *indirect = to_v3d_bo(indirect_csd->indirect); struct drm_v3d_submit_csd *args = &indirect_csd->job->args; - u32 *wg_counts; + struct v3d_dev *v3d = job->base.v3d; + u32 num_batches, *wg_counts; v3d_get_bo_vaddr(bo); v3d_get_bo_vaddr(indirect); @@ -314,8 +442,17 @@ v3d_rewrite_csd_job_wg_counts_from_indirect(struct v3d_cpu_job *job) args->cfg[0] = wg_counts[0] << V3D_CSD_CFG012_WG_COUNT_SHIFT; args->cfg[1] = wg_counts[1] << V3D_CSD_CFG012_WG_COUNT_SHIFT; args->cfg[2] = wg_counts[2] << V3D_CSD_CFG012_WG_COUNT_SHIFT; - args->cfg[4] = DIV_ROUND_UP(indirect_csd->wg_size, 16) * - (wg_counts[0] * wg_counts[1] * wg_counts[2]) - 1; + + num_batches = DIV_ROUND_UP(indirect_csd->wg_size, 16) * + (wg_counts[0] * wg_counts[1] * wg_counts[2]); + + /* V3D 7.1.6 and later don't subtract 1 from the number of batches */ + if (v3d->ver < 71 || (v3d->ver == 71 && v3d->rev < 6)) + args->cfg[4] = num_batches - 1; + else + args->cfg[4] = num_batches; + + WARN_ON(args->cfg[4] == ~0); for (int i = 0; i < 3; i++) { /* 0xffffffff indicates that the uniform rewrite is not needed */ @@ -369,18 +506,23 @@ v3d_reset_timestamp_queries(struct v3d_cpu_job *job) v3d_put_bo_vaddr(bo); } -static void -write_to_buffer(void *dst, u32 idx, bool do_64bit, u64 value) +static void write_to_buffer_32(u32 *dst, unsigned int idx, u32 value) { - if (do_64bit) { - u64 *dst64 = (u64 *)dst; + dst[idx] = value; +} - dst64[idx] = value; - } else { - u32 *dst32 = (u32 *)dst; +static void write_to_buffer_64(u64 *dst, unsigned int idx, u64 value) +{ + dst[idx] = value; +} - dst32[idx] = (u32)value; - } +static void +write_to_buffer(void *dst, unsigned int idx, bool do_64bit, u64 value) +{ + if (do_64bit) + write_to_buffer_64(dst, idx, value); + else + write_to_buffer_32(dst, idx, value); } static void @@ -453,18 +595,24 @@ v3d_reset_performance_queries(struct v3d_cpu_job *job) } static void -v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 query) +v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, + unsigned int query) { - struct v3d_performance_query_info *performance_query = &job->performance_query; - struct v3d_copy_query_results_info *copy = &job->copy; + struct v3d_performance_query_info *performance_query = + &job->performance_query; struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; + struct v3d_performance_query *perf_query = + &performance_query->queries[query]; struct v3d_dev *v3d = job->base.v3d; - struct v3d_perfmon *perfmon; - u64 counter_values[V3D_PERFCNT_NUM]; + unsigned int i, j, offset; + + for (i = 0, offset = 0; + i < performance_query->nperfmons; + i++, offset += DRM_V3D_MAX_PERF_COUNTERS) { + struct v3d_perfmon *perfmon; - for (int i = 0; i < performance_query->nperfmons; i++) { perfmon = v3d_perfmon_find(v3d_priv, - performance_query->queries[query].kperfmon_ids[i]); + perf_query->kperfmon_ids[i]); if (!perfmon) { DRM_DEBUG("Failed to find perfmon."); continue; @@ -472,14 +620,18 @@ v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, u32 quer v3d_perfmon_stop(v3d, perfmon, true); - memcpy(&counter_values[i * DRM_V3D_MAX_PERF_COUNTERS], perfmon->values, - perfmon->ncounters * sizeof(u64)); + if (job->copy.do_64bit) { + for (j = 0; j < perfmon->ncounters; j++) + write_to_buffer_64(data, offset + j, + perfmon->values[j]); + } else { + for (j = 0; j < perfmon->ncounters; j++) + write_to_buffer_32(data, offset + j, + perfmon->values[j]); + } v3d_perfmon_put(perfmon); } - - for (int i = 0; i < performance_query->ncounters; i++) - write_to_buffer(data, i, copy->do_64bit, counter_values[i]); } static void @@ -530,35 +682,19 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job) { struct v3d_cpu_job *job = to_cpu_job(sched_job); struct v3d_dev *v3d = job->base.v3d; - struct v3d_file_priv *file = job->base.file->driver_priv; - u64 runtime; - - v3d->cpu_job = job; if (job->job_type >= ARRAY_SIZE(cpu_job_function)) { DRM_DEBUG_DRIVER("Unknown CPU job: %d\n", job->job_type); return NULL; } - file->start_ns[V3D_CPU] = local_clock(); - v3d->queue[V3D_CPU].start_ns = file->start_ns[V3D_CPU]; - + v3d_job_start_stats(&job->base, V3D_CPU); trace_v3d_cpu_job_begin(&v3d->drm, job->job_type); cpu_job_function[job->job_type](job); trace_v3d_cpu_job_end(&v3d->drm, job->job_type); - - runtime = local_clock() - file->start_ns[V3D_CPU]; - - file->enabled_ns[V3D_CPU] += runtime; - v3d->queue[V3D_CPU].enabled_ns += runtime; - - file->jobs_sent[V3D_CPU]++; - v3d->queue[V3D_CPU].jobs_sent++; - - file->start_ns[V3D_CPU] = 0; - v3d->queue[V3D_CPU].start_ns = 0; + v3d_job_update_stats(&job->base, V3D_CPU); return NULL; } @@ -568,24 +704,12 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job) { struct v3d_job *job = to_v3d_job(sched_job); struct v3d_dev *v3d = job->v3d; - struct v3d_file_priv *file = job->file->driver_priv; - u64 runtime; - file->start_ns[V3D_CACHE_CLEAN] = local_clock(); - v3d->queue[V3D_CACHE_CLEAN].start_ns = file->start_ns[V3D_CACHE_CLEAN]; + v3d_job_start_stats(job, V3D_CACHE_CLEAN); v3d_clean_caches(v3d); - runtime = local_clock() - file->start_ns[V3D_CACHE_CLEAN]; - - file->enabled_ns[V3D_CACHE_CLEAN] += runtime; - v3d->queue[V3D_CACHE_CLEAN].enabled_ns += runtime; - - file->jobs_sent[V3D_CACHE_CLEAN]++; - v3d->queue[V3D_CACHE_CLEAN].jobs_sent++; - - file->start_ns[V3D_CACHE_CLEAN] = 0; - v3d->queue[V3D_CACHE_CLEAN].start_ns = 0; + v3d_job_update_stats(job, V3D_CACHE_CLEAN); return NULL; } @@ -612,7 +736,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) /* Unblock schedulers and restart their jobs. */ for (q = 0; q < V3D_MAX_QUEUES; q++) { - drm_sched_start(&v3d->queue[q].sched, true); + drm_sched_start(&v3d->queue[q].sched, 0); } mutex_unlock(&v3d->reset_lock); @@ -620,11 +744,16 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) return DRM_GPU_SCHED_STAT_NOMINAL; } -/* If the current address or return address have changed, then the GPU - * has probably made progress and we should delay the reset. This - * could fail if the GPU got in an infinite loop in the CL, but that - * is pretty unlikely outside of an i-g-t testcase. - */ +static void +v3d_sched_skip_reset(struct drm_sched_job *sched_job) +{ + struct drm_gpu_scheduler *sched = sched_job->sched; + + spin_lock(&sched->job_list_lock); + list_add(&sched_job->list, &sched->pending_list); + spin_unlock(&sched->job_list_lock); +} + static enum drm_gpu_sched_stat v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 *timedout_ctca, u32 *timedout_ctra) @@ -634,9 +763,16 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); + /* If the current address or return address have changed, then the GPU + * has probably made progress and we should delay the reset. This + * could fail if the GPU got in an infinite loop in the CL, but that + * is pretty unlikely outside of an i-g-t testcase. + */ if (*timedout_ctca != ctca || *timedout_ctra != ctra) { *timedout_ctca = ctca; *timedout_ctra = ctra; + + v3d_sched_skip_reset(sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -676,11 +812,13 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) struct v3d_dev *v3d = job->base.v3d; u32 batches = V3D_CORE_READ(0, V3D_CSD_CURRENT_CFG4(v3d->ver)); - /* If we've made progress, skip reset and let the timer get - * rearmed. + /* If we've made progress, skip reset, add the job to the pending + * list, and let the timer get rearmed. */ if (job->timedout_batches != batches) { job->timedout_batches = batches; + + v3d_sched_skip_reset(sched_job); return DRM_GPU_SCHED_STAT_NOMINAL; } @@ -723,67 +861,54 @@ static const struct drm_sched_backend_ops v3d_cpu_sched_ops = { .free_job = v3d_cpu_job_free }; +static int +v3d_queue_sched_init(struct v3d_dev *v3d, const struct drm_sched_backend_ops *ops, + enum v3d_queue queue, const char *name) +{ + struct drm_sched_init_args args = { + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = 1, + .timeout = msecs_to_jiffies(500), + .dev = v3d->drm.dev, + }; + + args.ops = ops; + args.name = name; + + return drm_sched_init(&v3d->queue[queue].sched, &args); +} + int v3d_sched_init(struct v3d_dev *v3d) { - int hw_jobs_limit = 1; - int job_hang_limit = 0; - int hang_limit_ms = 500; int ret; - ret = drm_sched_init(&v3d->queue[V3D_BIN].sched, - &v3d_bin_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_bin", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_bin_sched_ops, V3D_BIN, "v3d_bin"); if (ret) return ret; - ret = drm_sched_init(&v3d->queue[V3D_RENDER].sched, - &v3d_render_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_render", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_render_sched_ops, V3D_RENDER, + "v3d_render"); if (ret) goto fail; - ret = drm_sched_init(&v3d->queue[V3D_TFU].sched, - &v3d_tfu_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_tfu", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_tfu_sched_ops, V3D_TFU, "v3d_tfu"); if (ret) goto fail; if (v3d_has_csd(v3d)) { - ret = drm_sched_init(&v3d->queue[V3D_CSD].sched, - &v3d_csd_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_csd", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_csd_sched_ops, V3D_CSD, + "v3d_csd"); if (ret) goto fail; - ret = drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched, - &v3d_cache_clean_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - hw_jobs_limit, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_cache_clean", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_cache_clean_sched_ops, + V3D_CACHE_CLEAN, "v3d_cache_clean"); if (ret) goto fail; } - ret = drm_sched_init(&v3d->queue[V3D_CPU].sched, - &v3d_cpu_sched_ops, NULL, - DRM_SCHED_PRIORITY_COUNT, - 1, job_hang_limit, - msecs_to_jiffies(hang_limit_ms), NULL, - NULL, "v3d_cpu", v3d->drm.dev); + ret = v3d_queue_sched_init(v3d, &v3d_cpu_sched_ops, V3D_CPU, "v3d_cpu"); if (ret) goto fail; diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index 88f63d526b22..4ff5de46fb22 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -11,10 +11,11 @@ #include "v3d_trace.h" /* Takes the reservation lock on all the BOs being referenced, so that - * at queue submit time we can update the reservations. + * we can attach fences and update the reservations after pushing the job + * to the queue. * * We don't lock the RCL the tile alloc/state BOs, or overflow memory - * (all of which are on exec->unref_list). They're entirely private + * (all of which are on render->unref_list). They're entirely private * to v3d, so we don't attach dma-buf fences to them. */ static int @@ -55,11 +56,11 @@ fail: * @bo_count: Number of GEM handles passed in * * The command validator needs to reference BOs by their index within - * the submitted job's BO list. This does the validation of the job's + * the submitted job's BO list. This does the validation of the job's * BO list and reference counting for the lifetime of the job. * * Note that this function doesn't need to unreference the BOs on - * failure, because that will happen at v3d_exec_cleanup() time. + * failure, because that will happen at `v3d_job_free()`. */ static int v3d_lookup_bos(struct drm_device *dev, @@ -452,6 +453,9 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv, { u32 __user *offsets, *syncs; struct drm_v3d_timestamp_query timestamp; + struct v3d_timestamp_query_info *query_info = &job->timestamp_query; + unsigned int i; + int err; if (!job) { DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); @@ -471,35 +475,44 @@ v3d_get_cpu_timestamp_query_params(struct drm_file *file_priv, job->job_type = V3D_CPU_JOB_TYPE_TIMESTAMP_QUERY; - job->timestamp_query.queries = kvmalloc_array(timestamp.count, - sizeof(struct v3d_timestamp_query), - GFP_KERNEL); - if (!job->timestamp_query.queries) + query_info->queries = kvmalloc_array(timestamp.count, + sizeof(struct v3d_timestamp_query), + GFP_KERNEL); + if (!query_info->queries) return -ENOMEM; offsets = u64_to_user_ptr(timestamp.offsets); syncs = u64_to_user_ptr(timestamp.syncs); - for (int i = 0; i < timestamp.count; i++) { + for (i = 0; i < timestamp.count; i++) { u32 offset, sync; - if (copy_from_user(&offset, offsets++, sizeof(offset))) { - kvfree(job->timestamp_query.queries); - return -EFAULT; + if (get_user(offset, offsets++)) { + err = -EFAULT; + goto error; } - job->timestamp_query.queries[i].offset = offset; + query_info->queries[i].offset = offset; - if (copy_from_user(&sync, syncs++, sizeof(sync))) { - kvfree(job->timestamp_query.queries); - return -EFAULT; + if (get_user(sync, syncs++)) { + err = -EFAULT; + goto error; } - job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + query_info->queries[i].syncobj = drm_syncobj_find(file_priv, + sync); + if (!query_info->queries[i].syncobj) { + err = -ENOENT; + goto error; + } } - job->timestamp_query.count = timestamp.count; + query_info->count = timestamp.count; return 0; + +error: + v3d_timestamp_query_info_free(&job->timestamp_query, i); + return err; } static int @@ -509,6 +522,9 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv, { u32 __user *syncs; struct drm_v3d_reset_timestamp_query reset; + struct v3d_timestamp_query_info *query_info = &job->timestamp_query; + unsigned int i; + int err; if (!job) { DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); @@ -525,29 +541,38 @@ v3d_get_cpu_reset_timestamp_params(struct drm_file *file_priv, job->job_type = V3D_CPU_JOB_TYPE_RESET_TIMESTAMP_QUERY; - job->timestamp_query.queries = kvmalloc_array(reset.count, - sizeof(struct v3d_timestamp_query), - GFP_KERNEL); - if (!job->timestamp_query.queries) + query_info->queries = kvmalloc_array(reset.count, + sizeof(struct v3d_timestamp_query), + GFP_KERNEL); + if (!query_info->queries) return -ENOMEM; syncs = u64_to_user_ptr(reset.syncs); - for (int i = 0; i < reset.count; i++) { + for (i = 0; i < reset.count; i++) { u32 sync; - job->timestamp_query.queries[i].offset = reset.offset + 8 * i; + query_info->queries[i].offset = reset.offset + 8 * i; - if (copy_from_user(&sync, syncs++, sizeof(sync))) { - kvfree(job->timestamp_query.queries); - return -EFAULT; + if (get_user(sync, syncs++)) { + err = -EFAULT; + goto error; } - job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + query_info->queries[i].syncobj = drm_syncobj_find(file_priv, + sync); + if (!query_info->queries[i].syncobj) { + err = -ENOENT; + goto error; + } } - job->timestamp_query.count = reset.count; + query_info->count = reset.count; return 0; + +error: + v3d_timestamp_query_info_free(&job->timestamp_query, i); + return err; } /* Get data for the copy timestamp query results job submission. */ @@ -558,7 +583,9 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, { u32 __user *offsets, *syncs; struct drm_v3d_copy_timestamp_query copy; - int i; + struct v3d_timestamp_query_info *query_info = &job->timestamp_query; + unsigned int i; + int err; if (!job) { DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); @@ -578,10 +605,10 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, job->job_type = V3D_CPU_JOB_TYPE_COPY_TIMESTAMP_QUERY; - job->timestamp_query.queries = kvmalloc_array(copy.count, - sizeof(struct v3d_timestamp_query), - GFP_KERNEL); - if (!job->timestamp_query.queries) + query_info->queries = kvmalloc_array(copy.count, + sizeof(struct v3d_timestamp_query), + GFP_KERNEL); + if (!query_info->queries) return -ENOMEM; offsets = u64_to_user_ptr(copy.offsets); @@ -590,21 +617,26 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, for (i = 0; i < copy.count; i++) { u32 offset, sync; - if (copy_from_user(&offset, offsets++, sizeof(offset))) { - kvfree(job->timestamp_query.queries); - return -EFAULT; + if (get_user(offset, offsets++)) { + err = -EFAULT; + goto error; } - job->timestamp_query.queries[i].offset = offset; + query_info->queries[i].offset = offset; - if (copy_from_user(&sync, syncs++, sizeof(sync))) { - kvfree(job->timestamp_query.queries); - return -EFAULT; + if (get_user(sync, syncs++)) { + err = -EFAULT; + goto error; } - job->timestamp_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); + query_info->queries[i].syncobj = drm_syncobj_find(file_priv, + sync); + if (!query_info->queries[i].syncobj) { + err = -ENOENT; + goto error; + } } - job->timestamp_query.count = copy.count; + query_info->count = copy.count; job->copy.do_64bit = copy.do_64bit; job->copy.do_partial = copy.do_partial; @@ -613,6 +645,73 @@ v3d_get_cpu_copy_query_results_params(struct drm_file *file_priv, job->copy.stride = copy.stride; return 0; + +error: + v3d_timestamp_query_info_free(&job->timestamp_query, i); + return err; +} + +static int +v3d_copy_query_info(struct v3d_performance_query_info *query_info, + unsigned int count, + unsigned int nperfmons, + u32 __user *syncs, + u64 __user *kperfmon_ids, + struct drm_file *file_priv) +{ + unsigned int i, j; + int err; + + for (i = 0; i < count; i++) { + struct v3d_performance_query *query = &query_info->queries[i]; + u32 __user *ids_pointer; + u32 sync, id; + u64 ids; + + if (get_user(sync, syncs++)) { + err = -EFAULT; + goto error; + } + + if (get_user(ids, kperfmon_ids++)) { + err = -EFAULT; + goto error; + } + + query->kperfmon_ids = + kvmalloc_array(nperfmons, + sizeof(struct v3d_performance_query *), + GFP_KERNEL); + if (!query->kperfmon_ids) { + err = -ENOMEM; + goto error; + } + + ids_pointer = u64_to_user_ptr(ids); + + for (j = 0; j < nperfmons; j++) { + if (get_user(id, ids_pointer++)) { + kvfree(query->kperfmon_ids); + err = -EFAULT; + goto error; + } + + query->kperfmon_ids[j] = id; + } + + query->syncobj = drm_syncobj_find(file_priv, sync); + if (!query->syncobj) { + kvfree(query->kperfmon_ids); + err = -ENOENT; + goto error; + } + } + + return 0; + +error: + v3d_performance_query_info_free(query_info, i); + return err; } static int @@ -620,9 +719,9 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv, struct drm_v3d_extension __user *ext, struct v3d_cpu_job *job) { - u32 __user *syncs; - u64 __user *kperfmon_ids; + struct v3d_performance_query_info *query_info = &job->performance_query; struct drm_v3d_reset_performance_query reset; + int err; if (!job) { DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); @@ -639,46 +738,24 @@ v3d_get_cpu_reset_performance_params(struct drm_file *file_priv, job->job_type = V3D_CPU_JOB_TYPE_RESET_PERFORMANCE_QUERY; - job->performance_query.queries = kvmalloc_array(reset.count, - sizeof(struct v3d_performance_query), - GFP_KERNEL); - if (!job->performance_query.queries) + query_info->queries = + kvmalloc_array(reset.count, + sizeof(struct v3d_performance_query), + GFP_KERNEL); + if (!query_info->queries) return -ENOMEM; - syncs = u64_to_user_ptr(reset.syncs); - kperfmon_ids = u64_to_user_ptr(reset.kperfmon_ids); - - for (int i = 0; i < reset.count; i++) { - u32 sync; - u64 ids; - u32 __user *ids_pointer; - u32 id; - - if (copy_from_user(&sync, syncs++, sizeof(sync))) { - kvfree(job->performance_query.queries); - return -EFAULT; - } - - job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); - - if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) { - kvfree(job->performance_query.queries); - return -EFAULT; - } - - ids_pointer = u64_to_user_ptr(ids); + err = v3d_copy_query_info(query_info, + reset.count, + reset.nperfmons, + u64_to_user_ptr(reset.syncs), + u64_to_user_ptr(reset.kperfmon_ids), + file_priv); + if (err) + return err; - for (int j = 0; j < reset.nperfmons; j++) { - if (copy_from_user(&id, ids_pointer++, sizeof(id))) { - kvfree(job->performance_query.queries); - return -EFAULT; - } - - job->performance_query.queries[i].kperfmon_ids[j] = id; - } - } - job->performance_query.count = reset.count; - job->performance_query.nperfmons = reset.nperfmons; + query_info->count = reset.count; + query_info->nperfmons = reset.nperfmons; return 0; } @@ -688,9 +765,9 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv, struct drm_v3d_extension __user *ext, struct v3d_cpu_job *job) { - u32 __user *syncs; - u64 __user *kperfmon_ids; + struct v3d_performance_query_info *query_info = &job->performance_query; struct drm_v3d_copy_performance_query copy; + int err; if (!job) { DRM_DEBUG("CPU job extension was attached to a GPU job.\n"); @@ -710,47 +787,25 @@ v3d_get_cpu_copy_performance_query_params(struct drm_file *file_priv, job->job_type = V3D_CPU_JOB_TYPE_COPY_PERFORMANCE_QUERY; - job->performance_query.queries = kvmalloc_array(copy.count, - sizeof(struct v3d_performance_query), - GFP_KERNEL); - if (!job->performance_query.queries) + query_info->queries = + kvmalloc_array(copy.count, + sizeof(struct v3d_performance_query), + GFP_KERNEL); + if (!query_info->queries) return -ENOMEM; - syncs = u64_to_user_ptr(copy.syncs); - kperfmon_ids = u64_to_user_ptr(copy.kperfmon_ids); + err = v3d_copy_query_info(query_info, + copy.count, + copy.nperfmons, + u64_to_user_ptr(copy.syncs), + u64_to_user_ptr(copy.kperfmon_ids), + file_priv); + if (err) + return err; - for (int i = 0; i < copy.count; i++) { - u32 sync; - u64 ids; - u32 __user *ids_pointer; - u32 id; - - if (copy_from_user(&sync, syncs++, sizeof(sync))) { - kvfree(job->performance_query.queries); - return -EFAULT; - } - - job->performance_query.queries[i].syncobj = drm_syncobj_find(file_priv, sync); - - if (copy_from_user(&ids, kperfmon_ids++, sizeof(ids))) { - kvfree(job->performance_query.queries); - return -EFAULT; - } - - ids_pointer = u64_to_user_ptr(ids); - - for (int j = 0; j < copy.nperfmons; j++) { - if (copy_from_user(&id, ids_pointer++, sizeof(id))) { - kvfree(job->performance_query.queries); - return -EFAULT; - } - - job->performance_query.queries[i].kperfmon_ids[j] = id; - } - } - job->performance_query.count = copy.count; - job->performance_query.nperfmons = copy.nperfmons; - job->performance_query.ncounters = copy.ncounters; + query_info->count = copy.count; + query_info->nperfmons = copy.nperfmons; + query_info->ncounters = copy.ncounters; job->copy.do_64bit = copy.do_64bit; job->copy.do_partial = copy.do_partial; @@ -927,6 +982,11 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, goto fail; if (args->perfmon_id) { + if (v3d->global_perfmon) { + ret = -EAGAIN; + goto fail_perfmon; + } + render->base.perfmon = v3d_perfmon_find(v3d_priv, args->perfmon_id); @@ -1142,6 +1202,11 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, goto fail; if (args->perfmon_id) { + if (v3d->global_perfmon) { + ret = -EAGAIN; + goto fail_perfmon; + } + job->base.perfmon = v3d_perfmon_find(v3d_priv, args->perfmon_id); if (!job->base.perfmon) { diff --git a/drivers/gpu/drm/v3d/v3d_sysfs.c b/drivers/gpu/drm/v3d/v3d_sysfs.c index d106845ba890..d610e355964f 100644 --- a/drivers/gpu/drm/v3d/v3d_sysfs.c +++ b/drivers/gpu/drm/v3d/v3d_sysfs.c @@ -15,16 +15,15 @@ gpu_stats_show(struct device *dev, struct device_attribute *attr, char *buf) struct v3d_dev *v3d = to_v3d_dev(drm); enum v3d_queue queue; u64 timestamp = local_clock(); - u64 active_runtime; ssize_t len = 0; len += sysfs_emit(buf, "queue\ttimestamp\tjobs\truntime\n"); for (queue = 0; queue < V3D_MAX_QUEUES; queue++) { - if (v3d->queue[queue].start_ns) - active_runtime = timestamp - v3d->queue[queue].start_ns; - else - active_runtime = 0; + struct v3d_stats *stats = &v3d->queue[queue].stats; + u64 active_runtime, jobs_completed; + + v3d_get_stats(stats, timestamp, &active_runtime, &jobs_completed); /* Each line will display the queue name, timestamp, the number * of jobs sent to that queue and the runtime, as can be seem here: @@ -38,9 +37,7 @@ gpu_stats_show(struct device *dev, struct device_attribute *attr, char *buf) */ len += sysfs_emit_at(buf, len, "%s\t%llu\t%llu\t%llu\n", v3d_queue_to_string(queue), - timestamp, - v3d->queue[queue].jobs_sent, - v3d->queue[queue].enabled_ns + active_runtime); + timestamp, jobs_completed, active_runtime); } return len; |