diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/adreno_gpu.c')
| -rw-r--r-- | drivers/gpu/drm/msm/adreno/adreno_gpu.c | 269 |
1 files changed, 198 insertions, 71 deletions
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 8090dde03280..1c80909e63ca 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -10,7 +10,7 @@ #include <linux/interconnect.h> #include <linux/firmware/qcom/qcom_scm.h> #include <linux/kernel.h> -#include <linux/of_address.h> +#include <linux/of_reserved_mem.h> #include <linux/pm_opp.h> #include <linux/slab.h> #include <linux/soc/qcom/mdt_loader.h> @@ -33,7 +33,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, struct device *dev = &gpu->pdev->dev; const struct firmware *fw; const char *signed_fwname = NULL; - struct device_node *np, *mem_np; + struct device_node *np; struct resource r; phys_addr_t mem_phys; ssize_t mem_size; @@ -46,23 +46,16 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, } np = of_get_child_by_name(dev->of_node, "zap-shader"); - if (!np) { + if (!of_device_is_available(np)) { zap_available = false; return -ENODEV; } - mem_np = of_parse_phandle(np, "memory-region", 0); - of_node_put(np); - if (!mem_np) { + ret = of_reserved_mem_region_to_resource(np, 0, &r); + if (ret) { zap_available = false; - return -EINVAL; - } - - ret = of_address_to_resource(mem_np, 0, &r); - of_node_put(mem_np); - if (ret) return ret; - + } mem_phys = r.start; /* @@ -99,7 +92,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, * was a bad idea, and is only provided for backwards * compatibility for older targets. */ - return -ENODEV; + return -ENOENT; } if (IS_ERR(fw)) { @@ -191,25 +184,25 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); } -struct msm_gem_address_space * -adreno_create_address_space(struct msm_gpu *gpu, - struct platform_device *pdev) +struct drm_gpuvm * +adreno_create_vm(struct msm_gpu *gpu, + struct platform_device *pdev) { - return adreno_iommu_create_address_space(gpu, pdev, 0); + return adreno_iommu_create_vm(gpu, pdev, 0); } -struct msm_gem_address_space * -adreno_iommu_create_address_space(struct msm_gpu *gpu, - struct platform_device *pdev, - unsigned long quirks) +struct drm_gpuvm * +adreno_iommu_create_vm(struct msm_gpu *gpu, + struct platform_device *pdev, + unsigned long quirks) { struct iommu_domain_geometry *geometry; struct msm_mmu *mmu; - struct msm_gem_address_space *aspace; + struct drm_gpuvm *vm; u64 start, size; mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); - if (IS_ERR_OR_NULL(mmu)) + if (IS_ERR(mmu)) return ERR_CAST(mmu); geometry = msm_iommu_get_geometry(mmu); @@ -224,47 +217,95 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu, start = max_t(u64, SZ_16M, geometry->aperture_start); size = geometry->aperture_end - start + 1; - aspace = msm_gem_address_space_create(mmu, "gpu", - start & GENMASK_ULL(48, 0), size); + vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", start & GENMASK_ULL(48, 0), + size, true); - if (IS_ERR(aspace) && !IS_ERR(mmu)) + if (IS_ERR(vm) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); - return aspace; + return vm; } -u64 adreno_private_address_space_size(struct msm_gpu *gpu) +u64 adreno_private_vm_size(struct msm_gpu *gpu) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(&gpu->pdev->dev); + const struct io_pgtable_cfg *ttbr1_cfg; if (address_space_size) return address_space_size; - if (adreno_gpu->info->address_space_size) - return adreno_gpu->info->address_space_size; + if (adreno_gpu->info->quirks & ADRENO_QUIRK_4GB_VA) + return SZ_4G; + + if (!adreno_smmu || !adreno_smmu->get_ttbr1_cfg) + return SZ_4G; + + ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); + + /* + * Userspace VM is actually using TTBR0, but both are the same size, + * with b48 (sign bit) selecting which TTBRn to use. So if IAS is + * 48, the total (kernel+user) address space size is effectively + * 49 bits. But what userspace is control of is the lower 48. + */ + return BIT(ttbr1_cfg->ias) - ADRENO_VM_START; +} + +void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu) +{ + struct msm_gpu *gpu = &adreno_gpu->base; + struct msm_drm_private *priv = gpu->dev->dev_private; + unsigned long flags; + + /* + * Wait until the cooldown period has passed and we would actually + * collect a crashdump to re-enable stall-on-fault. + */ + spin_lock_irqsave(&priv->fault_stall_lock, flags); + if (!priv->stall_enabled && + ktime_after(ktime_get(), priv->stall_reenable_time) && + !READ_ONCE(gpu->crashstate)) { + struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; + + priv->stall_enabled = true; - return SZ_4G; + mmu->funcs->set_stall(mmu, true); + } + spin_unlock_irqrestore(&priv->fault_stall_lock, flags); } #define ARM_SMMU_FSR_TF BIT(1) #define ARM_SMMU_FSR_PF BIT(3) #define ARM_SMMU_FSR_EF BIT(4) +#define ARM_SMMU_FSR_SS BIT(30) int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, struct adreno_smmu_fault_info *info, const char *block, u32 scratch[4]) { + struct adreno_gpu *adreno_gpu = container_of(gpu, struct adreno_gpu, base); + struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; const char *type = "UNKNOWN"; - bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); + bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) && + !READ_ONCE(gpu->crashstate); + unsigned long irq_flags; /* - * If we aren't going to be resuming later from fault_worker, then do - * it now. + * In case there is a subsequent storm of pagefaults, disable + * stall-on-fault for at least half a second. */ - if (!do_devcoredump) { - gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); + spin_lock_irqsave(&priv->fault_stall_lock, irq_flags); + if (priv->stall_enabled) { + priv->stall_enabled = false; + + mmu->funcs->set_stall(mmu, false); } + priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500); + spin_unlock_irqrestore(&priv->fault_stall_lock, irq_flags); + /* * Print a default message if we couldn't get the data from the * adreno-smmu-priv @@ -291,29 +332,41 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, scratch[0], scratch[1], scratch[2], scratch[3]); if (do_devcoredump) { + struct msm_gpu_fault_info fault_info = {}; + /* Turn off the hangcheck timer to keep it from bothering us */ - del_timer(&gpu->hangcheck_timer); + timer_delete(&gpu->hangcheck_timer); + + /* Let any concurrent GMU transactions know that the MMU may be + * blocked for a while and they should wait on us. + */ + reinit_completion(&adreno_gpu->fault_coredump_done); - gpu->fault_info.ttbr0 = info->ttbr0; - gpu->fault_info.iova = iova; - gpu->fault_info.flags = flags; - gpu->fault_info.type = type; - gpu->fault_info.block = block; + fault_info.ttbr0 = info->ttbr0; + fault_info.iova = iova; + fault_info.flags = flags; + fault_info.type = type; + fault_info.block = block; - kthread_queue_work(gpu->worker, &gpu->fault_work); + msm_gpu_fault_crashstate_capture(gpu, &fault_info); + + complete_all(&adreno_gpu->fault_coredump_done); } return 0; } -int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, +int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t *value, uint32_t *len) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + struct drm_device *drm = gpu->dev; + /* Note ctx can be NULL when called from rd_open(): */ + struct drm_gpuvm *vm = ctx ? msm_context_vm(drm, ctx) : NULL; /* No pointer params yet */ if (*len != 0) - return -EINVAL; + return UERR(EINVAL, drm, "invalid len"); switch (param) { case MSM_PARAM_GPU_ID: @@ -323,7 +376,11 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, *value = adreno_gpu->info->gmem; return 0; case MSM_PARAM_GMEM_BASE: - *value = !adreno_is_a650_family(adreno_gpu) ? 0x100000 : 0; + if (adreno_is_a650_family(adreno_gpu) || + adreno_is_a740_family(adreno_gpu)) + *value = 0; + else + *value = 0x100000; return 0; case MSM_PARAM_CHIP_ID: *value = adreno_gpu->chip_id; @@ -351,8 +408,8 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, *value = 0; return 0; case MSM_PARAM_FAULTS: - if (ctx->aspace) - *value = gpu->global_faults + ctx->aspace->faults; + if (vm) + *value = gpu->global_faults + to_msm_vm(vm)->faults; else *value = gpu->global_faults; return 0; @@ -360,24 +417,43 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, *value = gpu->suspend_count; return 0; case MSM_PARAM_VA_START: - if (ctx->aspace == gpu->aspace) - return -EINVAL; - *value = ctx->aspace->va_start; + if (vm == gpu->vm) + return UERR(EINVAL, drm, "requires per-process pgtables"); + *value = vm->mm_start; return 0; case MSM_PARAM_VA_SIZE: - if (ctx->aspace == gpu->aspace) - return -EINVAL; - *value = ctx->aspace->va_size; + if (vm == gpu->vm) + return UERR(EINVAL, drm, "requires per-process pgtables"); + *value = vm->mm_range; + return 0; + case MSM_PARAM_HIGHEST_BANK_BIT: + *value = adreno_gpu->ubwc_config->highest_bank_bit; + return 0; + case MSM_PARAM_RAYTRACING: + *value = adreno_gpu->has_ray_tracing; + return 0; + case MSM_PARAM_UBWC_SWIZZLE: + *value = adreno_gpu->ubwc_config->ubwc_swizzle; + return 0; + case MSM_PARAM_MACROTILE_MODE: + *value = adreno_gpu->ubwc_config->macrotile_mode; + return 0; + case MSM_PARAM_UCHE_TRAP_BASE: + *value = adreno_gpu->uche_trap_base; + return 0; + case MSM_PARAM_HAS_PRR: + *value = adreno_smmu_has_prr(gpu); return 0; default: - DBG("%s: invalid param: %u", gpu->name, param); - return -EINVAL; + return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); } } -int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, +int adreno_set_param(struct msm_gpu *gpu, struct msm_context *ctx, uint32_t param, uint64_t value, uint32_t len) { + struct drm_device *drm = gpu->dev; + switch (param) { case MSM_PARAM_COMM: case MSM_PARAM_CMDLINE: @@ -385,11 +461,11 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, * that should be a reasonable upper bound */ if (len > PAGE_SIZE) - return -EINVAL; + return UERR(EINVAL, drm, "invalid len"); break; default: if (len != 0) - return -EINVAL; + return UERR(EINVAL, drm, "invalid len"); } switch (param) { @@ -418,11 +494,25 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, } case MSM_PARAM_SYSPROF: if (!capable(CAP_SYS_ADMIN)) - return -EPERM; - return msm_file_private_set_sysprof(ctx, gpu, value); + return UERR(EPERM, drm, "invalid permissions"); + return msm_context_set_sysprof(ctx, gpu, value); + case MSM_PARAM_EN_VM_BIND: + /* We can only support VM_BIND with per-process pgtables: */ + if (ctx->vm == gpu->vm) + return UERR(EINVAL, drm, "requires per-process pgtables"); + + /* + * We can only swtich to VM_BIND mode if the VM has not yet + * been created: + */ + if (ctx->vm) + return UERR(EBUSY, drm, "VM already created"); + + ctx->userspace_managed_vm = value; + + return 0; default: - DBG("%s: invalid param: %u", gpu->name, param); - return -EINVAL; + return UERR(EINVAL, drm, "%s: invalid param: %u", gpu->name, param); } } @@ -468,7 +558,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname) ret = request_firmware_direct(&fw, fwname, drm->dev); if (!ret) { DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", - newname); + fwname); adreno_gpu->fwloc = FW_LOCATION_LEGACY; goto out; } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { @@ -517,7 +607,7 @@ int adreno_load_fw(struct adreno_gpu *adreno_gpu) if (!adreno_gpu->info->fw[i]) continue; - /* Skip loading GMU firwmare with GMU Wrapper */ + /* Skip loading GMU firmware with GMU Wrapper */ if (adreno_has_gmu_wrapper(adreno_gpu) && i == ADRENO_FW_GMU) continue; @@ -542,7 +632,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, void *ptr; ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, - MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); + MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->vm, &bo, iova); if (IS_ERR(ptr)) return ERR_CAST(ptr); @@ -556,8 +646,19 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, int adreno_hw_init(struct msm_gpu *gpu) { + struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); + int ret; + VERB("%s", gpu->name); + if (adreno_gpu->info->family >= ADRENO_6XX_GEN1 && + qcom_scm_set_gpu_smmu_aperture_is_available()) { + /* We currently always use context bank 0, so hard code this */ + ret = qcom_scm_set_gpu_smmu_aperture(0); + if (ret) + DRM_DEV_ERROR(gpu->dev->dev, "unable to set SMMU aperture: %d\n", ret); + } + for (int i = 0; i < gpu->nr_rings; i++) { struct msm_ringbuffer *ring = gpu->rb[i]; @@ -567,6 +668,7 @@ int adreno_hw_init(struct msm_gpu *gpu) ring->cur = ring->start; ring->next = ring->start; ring->memptrs->rptr = 0; + ring->memptrs->bv_fence = ring->fctx->completed_fence; /* Detect and clean up an impossible fence, ie. if GPU managed * to scribble something invalid, we don't want that to confuse @@ -677,11 +779,9 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state) size = j + 1; if (size) { - state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL); - if (state->ring[i].data) { - memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2); + state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL); + if (state->ring[i].data) state->ring[i].data_size = size << 2; - } } } @@ -725,6 +825,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state) for (i = 0; state->bos && i < state->nr_bos; i++) kvfree(state->bos[i].data); + kfree(state->vm_logs); kfree(state->bos); kfree(state->comm); kfree(state->cmd); @@ -853,6 +954,26 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ"); drm_printf(p, " - type=%s\n", info->type); drm_printf(p, " - source=%s\n", info->block); + + /* Information extracted from what we think are the current + * pgtables. Hopefully the TTBR0 matches what we've extracted + * from the SMMU registers in smmu_info! + */ + drm_puts(p, "pgtable-fault-info:\n"); + drm_printf(p, " - ttbr0: %.16llx\n", (u64)info->pgtbl_ttbr0); + drm_printf(p, " - asid: %d\n", info->asid); + drm_printf(p, " - ptes: %.16llx %.16llx %.16llx %.16llx\n", + info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]); + } + + if (state->vm_logs) { + drm_puts(p, "vm-log:\n"); + for (i = 0; i < state->nr_vm_logs; i++) { + struct msm_gem_vm_log_entry *e = &state->vm_logs[i]; + drm_printf(p, " - %s:%d: 0x%016llx-0x%016llx\n", + e->op, e->queue_id, e->iova, + e->iova + e->range); + } } drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); @@ -879,6 +1000,7 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, drm_printf(p, " - iova: 0x%016llx\n", state->bos[i].iova); drm_printf(p, " size: %zd\n", state->bos[i].size); + drm_printf(p, " flags: 0x%x\n", state->bos[i].flags); drm_printf(p, " name: %-32s\n", state->bos[i].name); adreno_show_object(p, &state->bos[i].data, @@ -1071,9 +1193,11 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, adreno_gpu->chip_id = config->chip_id; gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1; + gpu->pdev = pdev; /* Only handle the core clock when GMU is not in use (or is absent). */ if (adreno_has_gmu_wrapper(adreno_gpu) || + adreno_has_rgmu(adreno_gpu) || adreno_gpu->info->family < ADRENO_6XX_GEN1) { /* * This can only be done before devm_pm_opp_of_add_table(), or @@ -1107,6 +1231,9 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, if (ret) return ret; + init_completion(&adreno_gpu->fault_coredump_done); + complete_all(&adreno_gpu->fault_coredump_done); + pm_runtime_set_autosuspend_delay(dev, adreno_gpu->info->inactive_period); pm_runtime_use_autosuspend(dev); |
