summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_job.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c491
1 files changed, 393 insertions, 98 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 3d641e10e6b6..0a0dcbf0798d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -24,189 +24,484 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/sched.h>
-#include <drm/drmP.h>
+
+#include <drm/drm_drv.h>
+
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_reset.h"
+#include "amdgpu_dev_coredump.h"
+#include "amdgpu_xgmi.h"
-static void amdgpu_job_timedout(struct amd_sched_job *s_job)
+static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
{
- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ int i;
- DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
- job->base.sched->name,
- atomic_read(&job->ring->fence_drv.last_seq),
- job->ring->fence_drv.sync_seq);
+ dev_info(adev->dev, "Dumping IP State\n");
+ for (i = 0; i < adev->num_ip_blocks; i++)
+ if (adev->ip_blocks[i].version->funcs->dump_ip_state)
+ adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)&adev->ip_blocks[i]);
+ dev_info(adev->dev, "Dumping IP State Completed\n");
- if (amdgpu_sriov_vf(job->adev))
- amdgpu_sriov_gpu_reset(job->adev, job);
- else
- amdgpu_gpu_reset(job->adev);
+ amdgpu_coredump(adev, true, false, job);
+}
+
+static void amdgpu_job_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_hive_info *hive = NULL;
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+ /*
+ * Reuse the logic in amdgpu_device_gpu_recover() to build list of
+ * devices for code dump
+ */
+ INIT_LIST_HEAD(&device_list);
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (!list_is_first(&adev->reset_list, &device_list))
+ list_rotate_to_front(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ } else {
+ list_add_tail(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ /* Do the coredump for each device */
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list)
+ amdgpu_job_do_core_dump(tmp_adev, job);
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
+}
+
+static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
+{
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
+ struct drm_wedge_task_info *info = NULL;
+ struct amdgpu_task_info *ti = NULL;
+ struct amdgpu_device *adev = ring->adev;
+ int idx, r;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
+ dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s",
+ __func__, s_job->sched->name);
+
+ /* Effectively the job is aborted as the device is gone */
+ return DRM_GPU_SCHED_STAT_ENODEV;
+ }
+
+ /*
+ * Do the coredump immediately after a job timeout to get a very
+ * close dump/snapshot/representation of GPU's current error status
+ * Skip it for SRIOV, since VF FLR will be triggered by host driver
+ * before job timeout
+ */
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_job_core_dump(adev, job);
+
+ if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) &&
+ amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
+ dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
+ s_job->sched->name);
+ goto exit;
+ }
+
+ dev_err(adev->dev, "ring %s timeout, signaled seq=%u, emitted seq=%u\n",
+ job->base.sched->name, atomic_read(&ring->fence_drv.last_seq),
+ ring->fence_drv.sync_seq);
+
+ ti = amdgpu_vm_get_task_info_pasid(ring->adev, job->pasid);
+ if (ti) {
+ amdgpu_vm_print_task_info(adev, ti);
+ info = &ti->task;
+ }
+
+ /* attempt a per ring reset */
+ if (amdgpu_gpu_recovery &&
+ amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) &&
+ ring->funcs->reset) {
+ dev_err(adev->dev, "Starting %s ring reset\n",
+ s_job->sched->name);
+ r = amdgpu_ring_reset(ring, job->vmid, job->hw_fence);
+ if (!r) {
+ atomic_inc(&ring->adev->gpu_reset_counter);
+ dev_err(adev->dev, "Ring %s reset succeeded\n",
+ ring->sched.name);
+ drm_dev_wedged_event(adev_to_drm(adev),
+ DRM_WEDGE_RECOVERY_NONE, info);
+ goto exit;
+ }
+ dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name);
+ }
+
+ dma_fence_set_error(&s_job->s_fence->finished, -ETIME);
+
+ if (amdgpu_device_should_recover_gpu(ring->adev)) {
+ struct amdgpu_reset_context reset_context;
+ memset(&reset_context, 0, sizeof(reset_context));
+
+ reset_context.method = AMD_RESET_METHOD_NONE;
+ reset_context.reset_req_dev = adev;
+ reset_context.src = AMDGPU_RESET_SRC_JOB;
+ clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+
+ /*
+ * To avoid an unnecessary extra coredump, as we have already
+ * got the very close representation of GPU's error status
+ */
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+
+ r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
+ if (r)
+ dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
+ } else {
+ drm_sched_suspend_timeout(&ring->sched);
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.tdr_debug = true;
+ }
+
+exit:
+ amdgpu_vm_put_task_info(ti);
+ drm_dev_exit(idx);
+ return DRM_GPU_SCHED_STAT_RESET;
}
-int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
- struct amdgpu_job **job, struct amdgpu_vm *vm)
+int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct drm_sched_entity *entity, void *owner,
+ unsigned int num_ibs, struct amdgpu_job **job,
+ u64 drm_client_id)
{
- size_t size = sizeof(struct amdgpu_job);
+ struct amdgpu_fence *af;
+ int r;
if (num_ibs == 0)
return -EINVAL;
- size += sizeof(struct amdgpu_ib) * num_ibs;
-
- *job = kzalloc(size, GFP_KERNEL);
+ *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL);
if (!*job)
return -ENOMEM;
- (*job)->adev = adev;
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_job;
+ }
+ (*job)->hw_fence = af;
+
+ af = kzalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+ if (!af) {
+ r = -ENOMEM;
+ goto err_fence;
+ }
+ (*job)->hw_vm_fence = af;
+
(*job)->vm = vm;
- (*job)->ibs = (void *)&(*job)[1];
- (*job)->num_ibs = num_ibs;
- amdgpu_sync_create(&(*job)->sync);
- amdgpu_sync_create(&(*job)->dep_sync);
- amdgpu_sync_create(&(*job)->sched_sync);
+ amdgpu_sync_create(&(*job)->explicit_sync);
+ (*job)->generation = amdgpu_vm_generation(adev, vm);
+ (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
- return 0;
+ if (!entity)
+ return 0;
+
+ return drm_sched_job_init(&(*job)->base, entity, 1, owner,
+ drm_client_id);
+
+err_fence:
+ kfree((*job)->hw_fence);
+err_job:
+ kfree(*job);
+ *job = NULL;
+
+ return r;
}
-int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job)
+int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
+ struct drm_sched_entity *entity, void *owner,
+ size_t size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job, u64 k_job_id)
{
int r;
- r = amdgpu_job_alloc(adev, 1, job, NULL);
+ r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job,
+ k_job_id);
if (r)
return r;
- r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
- if (r)
+ (*job)->num_ibs = 1;
+ r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
+ if (r) {
+ if (entity)
+ drm_sched_job_cleanup(&(*job)->base);
+ kfree((*job)->hw_vm_fence);
+ kfree((*job)->hw_fence);
kfree(*job);
+ *job = NULL;
+ }
return r;
}
+void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
+ struct amdgpu_bo *gws, struct amdgpu_bo *oa)
+{
+ if (gds) {
+ job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
+ job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
+ }
+ if (gws) {
+ job->gws_base = amdgpu_bo_gpu_offset(gws) >> PAGE_SHIFT;
+ job->gws_size = amdgpu_bo_size(gws) >> PAGE_SHIFT;
+ }
+ if (oa) {
+ job->oa_base = amdgpu_bo_gpu_offset(oa) >> PAGE_SHIFT;
+ job->oa_size = amdgpu_bo_size(oa) >> PAGE_SHIFT;
+ }
+}
+
void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct dma_fence *f;
unsigned i;
- /* use sched fence if available */
- f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
+ /* Check if any fences were initialized */
+ if (job->base.s_fence && job->base.s_fence->finished.ops)
+ f = &job->base.s_fence->finished;
+ else if (job->hw_fence && job->hw_fence->base.ops)
+ f = &job->hw_fence->base;
+ else
+ f = NULL;
for (i = 0; i < job->num_ibs; ++i)
- amdgpu_ib_free(job->adev, &job->ibs[i], f);
+ amdgpu_ib_free(&job->ibs[i], f);
}
-static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
+static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
- struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
+ struct amdgpu_job *job = to_amdgpu_job(s_job);
+
+ drm_sched_job_cleanup(s_job);
+
+ amdgpu_sync_free(&job->explicit_sync);
+
+ if (job->hw_fence->base.ops)
+ dma_fence_put(&job->hw_fence->base);
+ else
+ kfree(job->hw_fence);
+ if (job->hw_vm_fence->base.ops)
+ dma_fence_put(&job->hw_vm_fence->base);
+ else
+ kfree(job->hw_vm_fence);
- dma_fence_put(job->fence);
- amdgpu_sync_free(&job->sync);
- amdgpu_sync_free(&job->dep_sync);
- amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
+void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
+ struct amdgpu_job *leader)
+{
+ struct dma_fence *fence = &leader->base.s_fence->scheduled;
+
+ WARN_ON(job->gang_submit);
+
+ /*
+ * Don't add a reference when we are the gang leader to avoid circle
+ * dependency.
+ */
+ if (job != leader)
+ dma_fence_get(fence);
+ job->gang_submit = fence;
+}
+
void amdgpu_job_free(struct amdgpu_job *job)
{
+ if (job->base.entity)
+ drm_sched_job_cleanup(&job->base);
+
amdgpu_job_free_resources(job);
+ amdgpu_sync_free(&job->explicit_sync);
+ if (job->gang_submit != &job->base.s_fence->scheduled)
+ dma_fence_put(job->gang_submit);
+
+ if (job->hw_fence->base.ops)
+ dma_fence_put(&job->hw_fence->base);
+ else
+ kfree(job->hw_fence);
+ if (job->hw_vm_fence->base.ops)
+ dma_fence_put(&job->hw_vm_fence->base);
+ else
+ kfree(job->hw_vm_fence);
- dma_fence_put(job->fence);
- amdgpu_sync_free(&job->sync);
- amdgpu_sync_free(&job->dep_sync);
- amdgpu_sync_free(&job->sched_sync);
kfree(job);
}
-int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
- struct amd_sched_entity *entity, void *owner,
- struct dma_fence **f)
+struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
+{
+ struct dma_fence *f;
+
+ drm_sched_job_arm(&job->base);
+ f = dma_fence_get(&job->base.s_fence->finished);
+ amdgpu_job_free_resources(job);
+ drm_sched_entity_push_job(&job->base);
+
+ return f;
+}
+
+int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
+ struct dma_fence **fence)
{
int r;
- job->ring = ring;
- if (!f)
- return -EINVAL;
+ job->base.sched = &ring->sched;
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job, fence);
- r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
if (r)
return r;
- job->owner = owner;
- job->fence_ctx = entity->fence_context;
- *f = dma_fence_get(&job->base.s_fence->finished);
- amdgpu_job_free_resources(job);
- amd_sched_entity_push_job(&job->base);
-
+ amdgpu_job_free(job);
return 0;
}
-static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
+static struct dma_fence *
+amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
{
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
- struct amdgpu_vm *vm = job->vm;
-
- struct dma_fence *fence = amdgpu_sync_get_fence(&job->dep_sync);
+ struct dma_fence *fence;
int r;
- if (amd_sched_dependency_optimized(fence, sched_job->s_entity)) {
- r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence);
- if (r)
- DRM_ERROR("Error adding fence to sync (%d)\n", r);
- }
- if (!fence)
- fence = amdgpu_sync_get_fence(&job->sync);
- while (fence == NULL && vm && !job->vm_id) {
- struct amdgpu_ring *ring = job->ring;
+ r = drm_sched_entity_error(s_entity);
+ if (r)
+ goto error;
- r = amdgpu_vm_grab_id(vm, ring, &job->sync,
- &job->base.s_fence->finished,
- job);
- if (r)
- DRM_ERROR("Error getting VM ID (%d)\n", r);
+ if (job->gang_submit) {
+ fence = amdgpu_device_switch_gang(ring->adev, job->gang_submit);
+ if (fence)
+ return fence;
+ }
- fence = amdgpu_sync_get_fence(&job->sync);
+ fence = amdgpu_device_enforce_isolation(ring->adev, ring, job);
+ if (fence)
+ return fence;
+
+ if (job->vm && !job->vmid) {
+ r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
+ if (r) {
+ dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r);
+ goto error;
+ }
+ return fence;
}
- return fence;
+ return NULL;
+
+error:
+ dma_fence_set_error(&job->base.s_fence->finished, r);
+ return NULL;
}
-static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
+static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
{
- struct dma_fence *fence = NULL;
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
+ struct amdgpu_device *adev = ring->adev;
+ struct dma_fence *fence = NULL, *finished;
struct amdgpu_job *job;
- struct amdgpu_fpriv *fpriv = NULL;
- int r;
+ int r = 0;
- if (!sched_job) {
- DRM_ERROR("job is null\n");
- return NULL;
- }
job = to_amdgpu_job(sched_job);
-
- BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
+ finished = &job->base.s_fence->finished;
trace_amdgpu_sched_run_job(job);
- if (job->vm)
- fpriv = container_of(job->vm, struct amdgpu_fpriv, vm);
- /* skip ib schedule when vram is lost */
- if (fpriv && amdgpu_kms_vram_lost(job->adev, fpriv))
- DRM_ERROR("Skip scheduling IBs!\n");
- else {
- r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence);
+
+ /* Skip job if VRAM is lost and never resubmit gangs */
+ if (job->generation != amdgpu_vm_generation(adev, job->vm) ||
+ (job->job_run_counter && job->gang_submit))
+ dma_fence_set_error(finished, -ECANCELED);
+
+ if (finished->error < 0) {
+ dev_dbg(adev->dev, "Skip scheduling IBs in ring(%s)",
+ ring->name);
+ } else {
+ r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, job,
+ &fence);
if (r)
- DRM_ERROR("Error scheduling IBs (%d)\n", r);
+ dev_err(adev->dev,
+ "Error scheduling IBs (%d) in ring(%s)", r,
+ ring->name);
}
- /* if gpu reset, hw fence will be replaced here */
- dma_fence_put(job->fence);
- job->fence = dma_fence_get(fence);
+
+ job->job_run_counter++;
amdgpu_job_free_resources(job);
+
+ fence = r ? ERR_PTR(r) : fence;
return fence;
}
-const struct amd_sched_backend_ops amdgpu_sched_ops = {
- .dependency = amdgpu_job_dependency,
+/*
+ * This is a duplicate function from DRM scheduler sched_internal.h.
+ * Plan is to remove it when amdgpu_job_stop_all_jobs_on_sched is removed, due
+ * latter being incorrect and racy.
+ *
+ * See https://lore.kernel.org/amd-gfx/44edde63-7181-44fb-a4f7-94e50514f539@amd.com/
+ */
+static struct drm_sched_job *
+drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
+{
+ struct spsc_node *node;
+
+ node = spsc_queue_pop(&entity->job_queue);
+ if (!node)
+ return NULL;
+
+ return container_of(node, struct drm_sched_job, queue_node);
+}
+
+void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_job *s_job;
+ struct drm_sched_entity *s_entity = NULL;
+ int i;
+
+ /* Signal all jobs not yet scheduled */
+ for (i = DRM_SCHED_PRIORITY_KERNEL; i < sched->num_rqs; i++) {
+ struct drm_sched_rq *rq = sched->sched_rq[i];
+ spin_lock(&rq->lock);
+ list_for_each_entry(s_entity, &rq->entities, list) {
+ while ((s_job = drm_sched_entity_queue_pop(s_entity))) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_signal(&s_fence->scheduled);
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+ }
+ spin_unlock(&rq->lock);
+ }
+
+ /* Signal all jobs already scheduled to HW */
+ list_for_each_entry(s_job, &sched->pending_list, list) {
+ struct drm_sched_fence *s_fence = s_job->s_fence;
+
+ dma_fence_set_error(&s_fence->finished, -EHWPOISON);
+ dma_fence_signal(&s_fence->finished);
+ }
+}
+
+const struct drm_sched_backend_ops amdgpu_sched_ops = {
+ .prepare_job = amdgpu_job_prepare_job,
.run_job = amdgpu_job_run,
.timedout_job = amdgpu_job_timedout,
.free_job = amdgpu_job_free_cb