summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm/msm_gpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm/msm_gpu.h')
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h337
1 files changed, 261 insertions, 76 deletions
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 445c6bfd4b6b..2894fc118485 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -9,18 +9,22 @@
#include <linux/adreno-smmu-priv.h>
#include <linux/clk.h>
+#include <linux/devfreq.h>
#include <linux/interconnect.h>
#include <linux/pm_opp.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
#include "msm_fence.h"
+#include "msm_gpu_trace.h"
#include "msm_ringbuffer.h"
#include "msm_gem.h"
struct msm_gem_submit;
+struct msm_gem_vm_log_entry;
struct msm_gpu_perfcntr;
struct msm_gpu_state;
+struct msm_context;
struct msm_gpu_config {
const char *ioname;
@@ -42,8 +46,17 @@ struct msm_gpu_config {
* + z180_gpu
*/
struct msm_gpu_funcs {
- int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+ int (*get_param)(struct msm_gpu *gpu, struct msm_context *ctx,
+ uint32_t param, uint64_t *value, uint32_t *len);
+ int (*set_param)(struct msm_gpu *gpu, struct msm_context *ctx,
+ uint32_t param, uint64_t value, uint32_t len);
int (*hw_init)(struct msm_gpu *gpu);
+
+ /**
+ * @ucode_load: Optional hook to upload fw to GEM objs
+ */
+ int (*ucode_load)(struct msm_gpu *gpu);
+
int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu);
void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
@@ -59,16 +72,27 @@ struct msm_gpu_funcs {
/* for generation specific debugfs: */
void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
- unsigned long (*gpu_busy)(struct msm_gpu *gpu);
+ /* note: gpu_busy() can assume that we have been pm_resumed */
+ u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
int (*gpu_state_put)(struct msm_gpu_state *state);
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
- void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
- struct msm_gem_address_space *(*create_address_space)
- (struct msm_gpu *gpu, struct platform_device *pdev);
- struct msm_gem_address_space *(*create_private_address_space)
- (struct msm_gpu *gpu);
+ /* note: gpu_set_freq() can assume that we have been pm_resumed */
+ void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
+ bool suspended);
+ struct drm_gpuvm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev);
+ struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu, bool kernel_managed);
uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+
+ /**
+ * progress: Has the GPU made progress?
+ *
+ * Return true if GPU position in cmdstream has advanced (or changed)
+ * since the last call. To avoid false negatives, this should account
+ * for cmdstream that is buffered in this FIFO upstream of the CP fw.
+ */
+ bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+ void (*sysprof_setup)(struct msm_gpu *gpu);
};
/* Additional state for iommu faults: */
@@ -78,6 +102,14 @@ struct msm_gpu_fault_info {
int flags;
const char *type;
const char *block;
+
+ /* Information about what we think/expect is the current SMMU state,
+ * for example expected_ttbr0 should match smmu_info.ttbr0 which
+ * was read back from SMMU registers.
+ */
+ phys_addr_t pgtbl_ttbr0;
+ u64 ptes[4];
+ int asid;
};
/**
@@ -87,12 +119,19 @@ struct msm_gpu_devfreq {
/** devfreq: devfreq instance */
struct devfreq *devfreq;
+ /** lock: lock for "suspended", "busy_cycles", and "time" */
+ struct mutex lock;
+
/**
- * idle_constraint:
+ * idle_freq:
*
- * A PM QoS constraint to limit max freq while the GPU is idle.
+ * Shadow frequency used while the GPU is idle. From the PoV of
+ * the devfreq governor, we are continuing to sample busyness and
+ * adjust frequency while the GPU is idle, but we use this shadow
+ * value as the GPU is actually clamped to minimum frequency while
+ * it is inactive.
*/
- struct dev_pm_qos_request idle_freq;
+ unsigned long idle_freq;
/**
* boost_constraint:
@@ -103,11 +142,8 @@ struct msm_gpu_devfreq {
struct dev_pm_qos_request boost_freq;
/**
- * busy_cycles:
- *
- * Used by implementation of gpu->gpu_busy() to track the last
- * busy counter value, for calculating elapsed busy cycles since
- * last sampling period.
+ * busy_cycles: Last busy counter value, for calculating elapsed busy
+ * cycles since last sampling period.
*/
u64 busy_cycles;
@@ -131,6 +167,9 @@ struct msm_gpu_devfreq {
* elapsed
*/
struct msm_hrtimer_work boost_work;
+
+ /** suspended: tracks if we're suspended */
+ bool suspended;
};
struct msm_gpu {
@@ -157,21 +196,11 @@ struct msm_gpu {
int nr_rings;
/**
- * cur_ctx_seqno:
+ * sysprof_active:
*
- * The ctx->seqno value of the last context to submit rendering,
- * and the one with current pgtables installed (for generations
- * that support per-context pgtables). Tracked by seqno rather
- * than pointer value to avoid dangling pointers, and cases where
- * a ctx can be freed and a new one created with the same address.
- */
- int cur_ctx_seqno;
-
- /*
- * List of GEM active objects on this gpu. Protected by
- * msm_drm_private::mm_lock
+ * The count of contexts that have enabled system profiling.
*/
- struct list_head active_list;
+ refcount_t sysprof_active;
/**
* lock:
@@ -199,13 +228,16 @@ struct msm_gpu {
/* does gpu need hw_init? */
bool needs_hw_init;
- /* number of GPU hangs (for all contexts) */
+ /**
+ * global_faults: number of GPU hangs not attributed to a particular
+ * address space
+ */
int global_faults;
void __iomem *mmio;
int irq;
- struct msm_gem_address_space *aspace;
+ struct drm_gpuvm *vm;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
@@ -219,17 +251,15 @@ struct msm_gpu {
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
#define DRM_MSM_HANGCHECK_DEFAULT_PERIOD 500 /* in ms */
+#define DRM_MSM_HANGCHECK_PROGRESS_RETRIES 3
struct timer_list hangcheck_timer;
- /* Fault info for most recent iova fault: */
- struct msm_gpu_fault_info fault_info;
-
- /* work for handling GPU ioval faults: */
- struct kthread_work fault_work;
-
/* work for handling GPU recovery: */
struct kthread_work recover_work;
+ /** retire_event: notified when submits are retired: */
+ wait_queue_head_t retire_event;
+
/* work for handling active-list retiring: */
struct kthread_work retire_work;
@@ -244,21 +274,42 @@ struct msm_gpu {
struct msm_gpu_state *crashstate;
- /* Enable clamping to idle freq when inactive: */
- bool clamp_to_idle;
-
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
+ /**
+ * @allow_relocs: allow relocs in SUBMIT ioctl
+ *
+ * Mesa won't use relocs for driver version 1.4.0 and later. This
+ * switch-over happened early enough in mesa a6xx bringup that we
+ * can disallow relocs for a6xx and newer.
+ */
+ bool allow_relocs;
+
struct thermal_cooling_device *cooling;
};
static inline struct msm_gpu *dev_to_gpu(struct device *dev)
{
struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
+
+ if (!adreno_smmu)
+ return NULL;
+
return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
}
+static inline bool
+adreno_smmu_has_prr(struct msm_gpu *gpu)
+{
+ struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev);
+
+ if (!adreno_smmu)
+ return false;
+
+ return adreno_smmu && adreno_smmu->set_prr_addr;
+}
+
/* It turns out that all targets use the same ringbuffer size */
#define MSM_GPU_RINGBUFFER_SZ SZ_32K
#define MSM_GPU_RINGBUFFER_BLKSIZE 32
@@ -274,7 +325,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
- if (fence_after(ring->seqno, ring->memptrs->fence))
+ if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
return true;
}
@@ -299,29 +350,112 @@ struct msm_gpu_perfcntr {
* DRM_SCHED_PRIORITY_KERNEL priority level is treated specially in some
* cases, so we don't use it (no need for kernel generated jobs).
*/
-#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_HIGH - DRM_SCHED_PRIORITY_MIN)
+#define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
/**
- * struct msm_file_private - per-drm_file context
- *
- * @queuelock: synchronizes access to submitqueues list
- * @submitqueues: list of &msm_gpu_submitqueue created by userspace
- * @queueid: counter incremented each time a submitqueue is created,
- * used to assign &msm_gpu_submitqueue.id
- * @aspace: the per-process GPU address-space
- * @ref: reference count
- * @seqno: unique per process seqno
+ * struct msm_context - per-drm_file context
*/
-struct msm_file_private {
+struct msm_context {
+ /** @queuelock: synchronizes access to submitqueues list */
rwlock_t queuelock;
+
+ /** @submitqueues: list of &msm_gpu_submitqueue created by userspace */
struct list_head submitqueues;
+
+ /**
+ * @queueid:
+ *
+ * Counter incremented each time a submitqueue is created, used to
+ * assign &msm_gpu_submitqueue.id
+ */
int queueid;
- struct msm_gem_address_space *aspace;
+
+ /**
+ * @closed: The device file associated with this context has been closed.
+ *
+ * Once the device is closed, any submits that have not been written
+ * to the ring buffer are no-op'd.
+ */
+ bool closed;
+
+ /**
+ * @userspace_managed_vm:
+ *
+ * Has userspace opted-in to userspace managed VM (ie. VM_BIND) via
+ * MSM_PARAM_EN_VM_BIND?
+ */
+ bool userspace_managed_vm;
+
+ /**
+ * @vm:
+ *
+ * The per-process GPU address-space. Do not access directly, use
+ * msm_context_vm().
+ */
+ struct drm_gpuvm *vm;
+
+ /** @kref: the reference count */
struct kref ref;
+
+ /**
+ * @seqno:
+ *
+ * A unique per-process sequence number. Used to detect context
+ * switches, without relying on keeping a, potentially dangling,
+ * pointer to the previous context.
+ */
int seqno;
/**
- * entities:
+ * @sysprof:
+ *
+ * The value of MSM_PARAM_SYSPROF set by userspace. This is
+ * intended to be used by system profiling tools like Mesa's
+ * pps-producer (perfetto), and restricted to CAP_SYS_ADMIN.
+ *
+ * Setting a value of 1 will preserve performance counters across
+ * context switches. Setting a value of 2 will in addition
+ * suppress suspend. (Performance counters lose state across
+ * power collapse, which is undesirable for profiling in some
+ * cases.)
+ *
+ * The value automatically reverts to zero when the drm device
+ * file is closed.
+ */
+ int sysprof;
+
+ /**
+ * @comm: Overridden task comm, see MSM_PARAM_COMM
+ *
+ * Accessed under msm_gpu::lock
+ */
+ char *comm;
+
+ /**
+ * @cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
+ *
+ * Accessed under msm_gpu::lock
+ */
+ char *cmdline;
+
+ /**
+ * @elapsed:
+ *
+ * The total (cumulative) elapsed time GPU was busy with rendering
+ * from this context in ns.
+ */
+ uint64_t elapsed_ns;
+
+ /**
+ * @cycles:
+ *
+ * The total (cumulative) GPU cycles elapsed attributed to this
+ * context.
+ */
+ uint64_t cycles;
+
+ /**
+ * @entities:
*
* Table of per-priority-level sched entities used by submitqueues
* associated with this &drm_file. Because some userspace apps
@@ -332,8 +466,34 @@ struct msm_file_private {
* level.
*/
struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS];
+
+ /**
+ * @ctx_mem:
+ *
+ * Total amount of memory of GEM buffers with handles attached for
+ * this context.
+ */
+ atomic64_t ctx_mem;
};
+struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx);
+
+/**
+ * msm_context_is_vm_bind() - has userspace opted in to VM_BIND?
+ *
+ * @ctx: the drm_file context
+ *
+ * See MSM_PARAM_EN_VM_BIND. If userspace is managing the VM, it can
+ * do sparse binding including having multiple, potentially partial,
+ * mappings in the VM. Therefore certain legacy uabi (ie. GET_IOVA,
+ * SET_IOVA) are rejected because they don't have a sensible meaning.
+ */
+static inline bool
+msm_context_is_vmbind(struct msm_context *ctx)
+{
+ return ctx->userspace_managed_vm;
+}
+
/**
* msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
*
@@ -400,7 +560,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @node: node in the context's list of submitqueues
* @fence_idr: maps fence-id to dma_fence for userspace visible fence
* seqno, protected by submitqueue lock
- * @lock: submitqueue lock
+ * @idr_lock: for serializing access to fence_idr
+ * @lock: submitqueue lock for serializing submits on a queue
* @ref: reference count
* @entity: the submit job-queue
*/
@@ -410,19 +571,25 @@ struct msm_gpu_submitqueue {
u32 ring_nr;
int faults;
uint32_t last_fence;
- struct msm_file_private *ctx;
+ struct msm_context *ctx;
struct list_head node;
struct idr fence_idr;
+ struct spinlock idr_lock;
struct mutex lock;
struct kref ref;
struct drm_sched_entity *entity;
+
+ /** @_vm_bind_entity: used for @entity pointer for VM_BIND queues */
+ struct drm_sched_entity _vm_bind_entity[0];
};
struct msm_gpu_state_bo {
u64 iova;
size_t size;
+ u32 flags;
void *data;
bool encoded;
+ char name[32];
};
struct msm_gpu_state {
@@ -450,26 +617,32 @@ struct msm_gpu_state {
struct msm_gpu_fault_info fault_info;
+ int nr_vm_logs;
+ struct msm_gem_vm_log_entry *vm_logs;
+
int nr_bos;
struct msm_gpu_state_bo *bos;
};
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
- msm_writel(data, gpu->mmio + (reg << 2));
+ trace_msm_gpu_regaccess(reg);
+ writel(data, gpu->mmio + (reg << 2));
}
static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
{
- return msm_readl(gpu->mmio + (reg << 2));
+ trace_msm_gpu_regaccess(reg);
+ return readl(gpu->mmio + (reg << 2));
}
static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
{
+ trace_msm_gpu_regaccess(reg);
msm_rmw(gpu->mmio + (reg << 2), mask, or);
}
-static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg)
{
u64 val;
@@ -487,44 +660,52 @@ static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
* when the lo is read, so make sure to read the lo first to trigger
* that
*/
- val = (u64) msm_readl(gpu->mmio + (lo << 2));
- val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+ trace_msm_gpu_regaccess(reg);
+ val = (u64) readl(gpu->mmio + (reg << 2));
+ trace_msm_gpu_regaccess(reg+1);
+ val |= ((u64) readl(gpu->mmio + ((reg + 1) << 2)) << 32);
return val;
}
-static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val)
{
+ trace_msm_gpu_regaccess(reg);
/* Why not a writeq here? Read the screed above */
- msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
- msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+ writel(lower_32_bits(val), gpu->mmio + (reg << 2));
+ trace_msm_gpu_regaccess(reg+1);
+ writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2));
}
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
-int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
-struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_context *ctx,
+ struct drm_printer *p);
+
+int msm_submitqueue_init(struct drm_device *drm, struct msm_context *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm,
- struct msm_file_private *ctx,
+ struct msm_context *ctx,
u32 prio, u32 flags, u32 *id);
-int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_query(struct drm_device *drm, struct msm_context *ctx,
struct drm_msm_submitqueue_query *args);
-int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
-void msm_submitqueue_close(struct msm_file_private *ctx);
+int msm_submitqueue_remove(struct msm_context *ctx, u32 id);
+void msm_submitqueue_close(struct msm_context *ctx);
void msm_submitqueue_destroy(struct kref *kref);
-void __msm_file_private_destroy(struct kref *kref);
+int msm_context_set_sysprof(struct msm_context *ctx, struct msm_gpu *gpu, int sysprof);
+void __msm_context_destroy(struct kref *kref);
-static inline void msm_file_private_put(struct msm_file_private *ctx)
+static inline void msm_context_put(struct msm_context *ctx)
{
- kref_put(&ctx->ref, __msm_file_private_destroy);
+ kref_put(&ctx->ref, __msm_context_destroy);
}
-static inline struct msm_file_private *msm_file_private_get(
- struct msm_file_private *ctx)
+static inline struct msm_context *msm_context_get(
+ struct msm_context *ctx)
{
kref_get(&ctx->ref);
return ctx;
@@ -552,12 +733,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config);
-struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
+struct drm_gpuvm *
+msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task,
+ bool kernel_managed);
void msm_gpu_cleanup(struct msm_gpu *gpu);
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
+bool adreno_has_gpu(struct device_node *node);
void __init adreno_register(void);
void __exit adreno_unregister(void);
@@ -595,6 +778,8 @@ static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
mutex_unlock(&gpu->lock);
}
+void msm_gpu_fault_crashstate_capture(struct msm_gpu *gpu, struct msm_gpu_fault_info *fault_info);
+
/*
* Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
* support expanded privileges