diff options
Diffstat (limited to 'drivers/gpu/drm/etnaviv/etnaviv_gpu.c')
| -rw-r--r-- | drivers/gpu/drm/etnaviv/etnaviv_gpu.c | 472 |
1 files changed, 293 insertions, 179 deletions
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 37018bc55810..ca0be293f5fe 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -8,13 +8,16 @@ #include <linux/delay.h> #include <linux/dma-fence.h> #include <linux/dma-mapping.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> +#include <linux/reset.h> #include <linux/thermal.h> +#include <drm/drm_print.h> + #include "etnaviv_cmdbuf.h" #include "etnaviv_dump.h" #include "etnaviv_gpu.h" @@ -172,10 +175,35 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) return 0; } +static int etnaviv_gpu_reset_deassert(struct etnaviv_gpu *gpu) +{ + int ret; + + /* + * 32 core clock cycles (slowest clock) required before deassertion + * 1 microsecond might match all implementations without computation + */ + usleep_range(1, 2); + + ret = reset_control_deassert(gpu->rst); + if (ret) + return ret; + + /* + * 128 core clock cycles (slowest clock) required before any activity on AHB + * 1 microsecond might match all implementations without computation + */ + usleep_range(1, 2); + + return 0; +} + +static inline bool etnaviv_is_model_rev(struct etnaviv_gpu *gpu, u32 model, u32 revision) +{ + return gpu->identity.model == model && + gpu->identity.revision == revision; +} -#define etnaviv_is_model_rev(gpu, mod, rev) \ - ((gpu)->identity.model == chipModel_##mod && \ - (gpu)->identity.revision == rev) #define etnaviv_field(val, field) \ (((val) & field##__MASK) >> field##__SHIFT) @@ -281,7 +309,7 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) switch (gpu->identity.instruction_count) { case 0: - if (etnaviv_is_model_rev(gpu, GC2000, 0x5108) || + if (etnaviv_is_model_rev(gpu, 0x2000, 0x5108) || gpu->identity.model == chipModel_GC880) gpu->identity.instruction_count = 512; else @@ -315,17 +343,17 @@ static void etnaviv_hw_specs(struct etnaviv_gpu *gpu) * For some cores, two varyings are consumed for position, so the * maximum varying count needs to be reduced by one. */ - if (etnaviv_is_model_rev(gpu, GC5000, 0x5434) || - etnaviv_is_model_rev(gpu, GC4000, 0x5222) || - etnaviv_is_model_rev(gpu, GC4000, 0x5245) || - etnaviv_is_model_rev(gpu, GC4000, 0x5208) || - etnaviv_is_model_rev(gpu, GC3000, 0x5435) || - etnaviv_is_model_rev(gpu, GC2200, 0x5244) || - etnaviv_is_model_rev(gpu, GC2100, 0x5108) || - etnaviv_is_model_rev(gpu, GC2000, 0x5108) || - etnaviv_is_model_rev(gpu, GC1500, 0x5246) || - etnaviv_is_model_rev(gpu, GC880, 0x5107) || - etnaviv_is_model_rev(gpu, GC880, 0x5106)) + if (etnaviv_is_model_rev(gpu, 0x5000, 0x5434) || + etnaviv_is_model_rev(gpu, 0x4000, 0x5222) || + etnaviv_is_model_rev(gpu, 0x4000, 0x5245) || + etnaviv_is_model_rev(gpu, 0x4000, 0x5208) || + etnaviv_is_model_rev(gpu, 0x3000, 0x5435) || + etnaviv_is_model_rev(gpu, 0x2200, 0x5244) || + etnaviv_is_model_rev(gpu, 0x2100, 0x5108) || + etnaviv_is_model_rev(gpu, 0x2000, 0x5108) || + etnaviv_is_model_rev(gpu, 0x1500, 0x5246) || + etnaviv_is_model_rev(gpu, 0x880, 0x5107) || + etnaviv_is_model_rev(gpu, 0x880, 0x5106)) gpu->identity.varyings_count -= 1; } @@ -351,7 +379,7 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) * Reading these two registers on GC600 rev 0x19 result in a * unhandled fault: external abort on non-linefetch */ - if (!etnaviv_is_model_rev(gpu, GC600, 0x19)) { + if (!etnaviv_is_model_rev(gpu, 0x600, 0x19)) { gpu->identity.product_id = gpu_read(gpu, VIVS_HI_CHIP_PRODUCT_ID); gpu->identity.eco_id = gpu_read(gpu, VIVS_HI_CHIP_ECO_ID); } @@ -368,7 +396,7 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) } /* Another special case */ - if (etnaviv_is_model_rev(gpu, GC300, 0x2201)) { + if (etnaviv_is_model_rev(gpu, 0x300, 0x2201)) { u32 chipTime = gpu_read(gpu, VIVS_HI_CHIP_TIME); if (chipDate == 0x20080814 && chipTime == 0x12051100) { @@ -387,15 +415,15 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) * Fix model/rev here, so all other places can refer to this * core by its real identity. */ - if (etnaviv_is_model_rev(gpu, GC2000, 0xffff5450)) { + if (etnaviv_is_model_rev(gpu, 0x2000, 0xffff5450)) { gpu->identity.model = chipModel_GC3000; gpu->identity.revision &= 0xffff; } - if (etnaviv_is_model_rev(gpu, GC1000, 0x5037) && (chipDate == 0x20120617)) + if (etnaviv_is_model_rev(gpu, 0x1000, 0x5037) && (chipDate == 0x20120617)) gpu->identity.eco_id = 1; - if (etnaviv_is_model_rev(gpu, GC320, 0x5303) && (chipDate == 0x20140511)) + if (etnaviv_is_model_rev(gpu, 0x320, 0x5303) && (chipDate == 0x20140511)) gpu->identity.eco_id = 1; } @@ -416,6 +444,12 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) if (gpu->identity.model == chipModel_GC700) gpu->identity.features &= ~chipFeatures_FAST_CLEAR; + /* These models/revisions don't have the 2D pipe bit */ + if ((gpu->identity.model == chipModel_GC500 && + gpu->identity.revision <= 2) || + gpu->identity.model == chipModel_GC300) + gpu->identity.features |= chipFeatures_PIPE_2D; + if ((gpu->identity.model == chipModel_GC500 && gpu->identity.revision < 2) || (gpu->identity.model == chipModel_GC300 && @@ -449,8 +483,9 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu) gpu_read(gpu, VIVS_HI_CHIP_MINOR_FEATURE_5); } - /* GC600 idle register reports zero bits where modules aren't present */ - if (gpu->identity.model == chipModel_GC600) + /* GC600/300 idle register reports zero bits where modules aren't present */ + if (gpu->identity.model == chipModel_GC600 || + gpu->identity.model == chipModel_GC300) gpu->idle_mask = VIVS_HI_IDLE_STATE_TX | VIVS_HI_IDLE_STATE_RA | VIVS_HI_IDLE_STATE_SE | @@ -486,6 +521,14 @@ static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu) clock |= VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); etnaviv_gpu_load_clock(gpu, clock); } + + /* + * Choose number of wait cycles to target a ~30us (1/32768) max latency + * until new work is picked up by the FE when it polls in the idle loop. + * If the GPU base frequency is unknown use 200 wait cycles. + */ + gpu->fe_waitcycles = clamp(gpu->base_rate_core >> (15 - gpu->freq_scale), + 200UL, 0xffffUL); } static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) @@ -498,8 +541,19 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) timeout = jiffies + msecs_to_jiffies(1000); while (time_is_after_jiffies(timeout)) { - /* enable clock */ unsigned int fscale = 1 << (6 - gpu->freq_scale); + u32 pulse_eater = 0x01590880; + + /* disable clock gating */ + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, 0x0); + + /* disable pulse eater */ + pulse_eater |= BIT(17); + gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater); + pulse_eater |= BIT(0); + gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater); + + /* enable clock */ control = VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale); etnaviv_gpu_load_clock(gpu, control); @@ -546,8 +600,8 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) continue; } - /* disable debug registers, as they are not normally needed */ - control |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; + /* enable debug register access */ + control &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, control); failed = false; @@ -569,7 +623,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) /* We rely on the GPU running, so program the clock */ etnaviv_gpu_update_clock(gpu); - gpu->fe_running = false; + gpu->state = ETNA_GPU_STATE_RESET; gpu->exec_state = -1; if (gpu->mmu_context) etnaviv_iommu_context_put(gpu->mmu_context); @@ -583,7 +637,7 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) u32 pmc, ppc; /* enable clock gating */ - ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); + ppc = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */ @@ -591,9 +645,9 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) gpu->identity.revision == 0x4302) ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING; - gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc); + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, ppc); - pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS); + pmc = gpu_read_power(gpu, VIVS_PM_MODULE_CONTROLS); /* Disable PA clock gating for GC400+ without bugfix except for GC420 */ if (gpu->identity.model >= chipModel_GC400 && @@ -615,20 +669,27 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) pmc |= BIT(15); /* Unknown bit */ /* Disable TX clock gating on affected core revisions. */ - if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) || - etnaviv_is_model_rev(gpu, GC2000, 0x5108)) + if (etnaviv_is_model_rev(gpu, 0x4000, 0x5222) || + etnaviv_is_model_rev(gpu, 0x2000, 0x5108) || + etnaviv_is_model_rev(gpu, 0x7000, 0x6202) || + etnaviv_is_model_rev(gpu, 0x7000, 0x6203)) pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX; - /* Disable SE, RA and TX clock gating on affected core revisions. */ - if (etnaviv_is_model_rev(gpu, GC7000, 0x6202)) + /* Disable SE and RA clock gating on affected core revisions. */ + if (etnaviv_is_model_rev(gpu, 0x7000, 0x6202)) pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE | - VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA | - VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX; + VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA; + + /* Disable SH_EU clock gating on affected core revisions. */ + if (etnaviv_is_model_rev(gpu, 0x8000, 0x7200) || + etnaviv_is_model_rev(gpu, 0x8000, 0x8002) || + etnaviv_is_model_rev(gpu, 0x9200, 0x6304)) + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SH_EU; pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ; pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ; - gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc); + gpu_write_power(gpu, VIVS_PM_MODULE_CONTROLS, pmc); } void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) @@ -643,8 +704,6 @@ void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch) VIVS_MMUv2_SEC_COMMAND_CONTROL_ENABLE | VIVS_MMUv2_SEC_COMMAND_CONTROL_PREFETCH(prefetch)); } - - gpu->fe_running = true; } static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu, @@ -653,6 +712,8 @@ static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu, u16 prefetch; u32 address; + WARN_ON(gpu->state != ETNA_GPU_STATE_INITIALIZED); + /* setup the MMU */ etnaviv_iommu_restore(gpu, context); @@ -662,6 +723,8 @@ static void etnaviv_gpu_start_fe_idleloop(struct etnaviv_gpu *gpu, &gpu->mmu_context->cmdbuf_mapping); etnaviv_gpu_start_fe(gpu, address, prefetch); + + gpu->state = ETNA_GPU_STATE_RUNNING; } static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu) @@ -672,14 +735,14 @@ static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu) */ u32 pulse_eater = 0x01590880; - if (etnaviv_is_model_rev(gpu, GC4000, 0x5208) || - etnaviv_is_model_rev(gpu, GC4000, 0x5222)) { + if (etnaviv_is_model_rev(gpu, 0x4000, 0x5208) || + etnaviv_is_model_rev(gpu, 0x4000, 0x5222)) { pulse_eater |= BIT(23); } - if (etnaviv_is_model_rev(gpu, GC1000, 0x5039) || - etnaviv_is_model_rev(gpu, GC1000, 0x5040)) { + if (etnaviv_is_model_rev(gpu, 0x1000, 0x5039) || + etnaviv_is_model_rev(gpu, 0x1000, 0x5040)) { pulse_eater &= ~BIT(16); pulse_eater |= BIT(17); } @@ -688,17 +751,20 @@ static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu) (gpu->identity.features & chipFeatures_PIPE_3D)) { /* Performance fix: disable internal DFS */ - pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER); + pulse_eater = gpu_read_power(gpu, VIVS_PM_PULSE_EATER); pulse_eater |= BIT(18); } - gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater); + gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater); } static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) { - if ((etnaviv_is_model_rev(gpu, GC320, 0x5007) || - etnaviv_is_model_rev(gpu, GC320, 0x5220)) && + WARN_ON(!(gpu->state == ETNA_GPU_STATE_IDENTIFIED || + gpu->state == ETNA_GPU_STATE_RESET)); + + if ((etnaviv_is_model_rev(gpu, 0x320, 0x5007) || + etnaviv_is_model_rev(gpu, 0x320, 0x5220)) && gpu_read(gpu, VIVS_HI_CHIP_TIME) != 0x2062400) { u32 mc_memory_debug; @@ -724,7 +790,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) VIVS_HI_AXI_CONFIG_ARCACHE(2)); /* GC2000 rev 5108 needs a special bus config */ - if (etnaviv_is_model_rev(gpu, GC2000, 0x5108)) { + if (etnaviv_is_model_rev(gpu, 0x2000, 0x5108)) { u32 bus_config = gpu_read(gpu, VIVS_MC_BUS_CONFIG); bus_config &= ~(VIVS_MC_BUS_CONFIG_FE_BUS_CONFIG__MASK | VIVS_MC_BUS_CONFIG_TX_BUS_CONFIG__MASK); @@ -743,6 +809,8 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) etnaviv_gpu_setup_pulse_eater(gpu); gpu_write(gpu, VIVS_HI_INTR_ENBL, ~0U); + + gpu->state = ETNA_GPU_STATE_INITIALIZED; } int etnaviv_gpu_init(struct etnaviv_gpu *gpu) @@ -757,6 +825,12 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) goto pm_put; } + ret = etnaviv_gpu_reset_deassert(gpu); + if (ret) { + dev_err(gpu->dev, "GPU reset deassert failed\n"); + goto fail; + } + etnaviv_hw_identify(gpu); if (gpu->identity.model == 0) { @@ -765,6 +839,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) goto fail; } + if (gpu->identity.nn_core_count > 0) + dev_warn(gpu->dev, "etnaviv has been instantiated on a NPU, " + "for which the UAPI is still experimental\n"); + /* Exclude VG cores with FE2.0 */ if (gpu->identity.features & chipFeatures_PIPE_VG && gpu->identity.features & chipFeatures_FE20) { @@ -781,6 +859,8 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) (gpu->identity.minor_features10 & chipMinorFeatures10_SECURITY_AHB)) gpu->sec_mode = ETNA_SEC_KERNEL; + gpu->state = ETNA_GPU_STATE_IDENTIFIED; + ret = etnaviv_hw_reset(gpu); if (ret) { dev_err(gpu->dev, "GPU reset failed\n"); @@ -791,17 +871,8 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) if (ret) goto fail; - /* - * If the GPU is part of a system with DMA addressing limitations, - * request pages for our SHM backend buffers from the DMA32 zone to - * hopefully avoid performance killing SWIOTLB bounce buffering. - */ - if (dma_addressing_limited(gpu->dev)) - priv->shm_gfp_mask |= GFP_DMA32; - /* Create buffer: */ - ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, - PAGE_SIZE); + ret = etnaviv_cmdbuf_init(priv->cmdbuf_suballoc, &gpu->buffer, SZ_4K); if (ret) { dev_err(gpu->dev, "could not create command buffer\n"); goto fail; @@ -815,12 +886,15 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) * * On MC1.0 cores the linear window offset is ignored by the TS engine, * leading to inconsistent memory views. Avoid using the offset on those - * cores if possible, otherwise disable the TS feature. + * cores if possible, otherwise disable the TS feature. MMUv2 doesn't + * expose this issue, as all TS accesses are MMU translated, so the + * linear window offset won't be used. */ cmdbuf_paddr = ALIGN_DOWN(etnaviv_cmdbuf_get_pa(&gpu->buffer), SZ_128M); if (!(gpu->identity.features & chipFeatures_PIPE_3D) || - (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) { + (gpu->identity.minor_features0 & chipMinorFeatures0_MC20) || + (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) { if (cmdbuf_paddr >= SZ_2G) priv->mmu_global->memory_base = SZ_2G; else @@ -847,8 +921,6 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) pm_runtime_mark_last_busy(gpu->dev); pm_runtime_put_autosuspend(gpu->dev); - gpu->initialized = true; - return 0; fail: @@ -949,6 +1021,8 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) gpu->identity.vertex_cache_size); seq_printf(m, "\t shader_core_count: %d\n", gpu->identity.shader_core_count); + seq_printf(m, "\t nn_core_count: %d\n", + gpu->identity.nn_core_count); seq_printf(m, "\t pixel_pipes: %d\n", gpu->identity.pixel_pipes); seq_printf(m, "\t vertex_output_buffer_size: %d\n", @@ -1045,34 +1119,6 @@ pm_put: } #endif -void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) -{ - unsigned int i; - - dev_err(gpu->dev, "recover hung GPU!\n"); - - if (pm_runtime_get_sync(gpu->dev) < 0) - goto pm_put; - - mutex_lock(&gpu->lock); - - etnaviv_hw_reset(gpu); - - /* complete all events, the GPU won't do it after the reset */ - spin_lock(&gpu->event_spinlock); - for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS) - complete(&gpu->event_free); - bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS); - spin_unlock(&gpu->event_spinlock); - - etnaviv_gpu_hw_init(gpu); - - mutex_unlock(&gpu->lock); - pm_runtime_mark_last_busy(gpu->dev); -pm_put: - pm_runtime_put_autosuspend(gpu->dev); -} - /* fence object management */ struct etnaviv_fence { struct etnaviv_gpu *gpu; @@ -1153,20 +1199,22 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events, unsigned int *events) { unsigned long timeout = msecs_to_jiffies(10 * 10000); - unsigned i, acquired = 0; + unsigned i, acquired = 0, rpm_count = 0; + int ret; for (i = 0; i < nr_events; i++) { - unsigned long ret; + unsigned long remaining; - ret = wait_for_completion_timeout(&gpu->event_free, timeout); + remaining = wait_for_completion_timeout(&gpu->event_free, timeout); - if (!ret) { + if (!remaining) { dev_err(gpu->dev, "wait_for_completion_timeout failed"); + ret = -EBUSY; goto out; } acquired++; - timeout = ret; + timeout = remaining; } spin_lock(&gpu->event_spinlock); @@ -1181,13 +1229,23 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events, spin_unlock(&gpu->event_spinlock); + for (i = 0; i < nr_events; i++) { + ret = pm_runtime_resume_and_get(gpu->dev); + if (ret) + goto out_rpm; + rpm_count++; + } + return 0; +out_rpm: + for (i = 0; i < rpm_count; i++) + pm_runtime_put_autosuspend(gpu->dev); out: for (i = 0; i < acquired; i++) complete(&gpu->event_free); - return -EBUSY; + return ret; } static void event_free(struct etnaviv_gpu *gpu, unsigned int event) @@ -1199,6 +1257,8 @@ static void event_free(struct etnaviv_gpu *gpu, unsigned int event) clear_bit(event, gpu->event_bitmap); complete(&gpu->event_free); } + + pm_runtime_put_autosuspend(gpu->dev); } /* @@ -1216,7 +1276,7 @@ int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, * pretends we didn't find a fence in that case. */ rcu_read_lock(); - fence = idr_find(&gpu->fence_idr, id); + fence = xa_load(&gpu->user_fences, id); if (fence) fence = dma_fence_get_rcu(fence); rcu_read_unlock(); @@ -1293,17 +1353,16 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu, { u32 val; + mutex_lock(&gpu->lock); + /* disable clock gating */ - val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); + val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; - gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val); - - /* enable debug register */ - val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); - val &= ~VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE); + + mutex_unlock(&gpu->lock); } static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, @@ -1313,23 +1372,22 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu, unsigned int i; u32 val; + mutex_lock(&gpu->lock); + sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST); + /* enable clock gating */ + val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS); + val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; + gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val); + + mutex_unlock(&gpu->lock); + for (i = 0; i < submit->nr_pmrs; i++) { const struct etnaviv_perfmon_request *pmr = submit->pmrs + i; *pmr->bo_vma = pmr->sequence; } - - /* disable debug register */ - val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); - val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS; - gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val); - - /* enable clock gating */ - val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); - val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; - gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val); } @@ -1341,15 +1399,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) unsigned int i, nr_events = 1, event[3]; int ret; - if (!submit->runtime_resumed) { - ret = pm_runtime_get_sync(gpu->dev); - if (ret < 0) { - pm_runtime_put_noidle(gpu->dev); - return NULL; - } - submit->runtime_resumed = true; - } - /* * if there are performance monitor requests we need to have * - a sync point to re-configure gpu and process ETNA_PM_PROCESS_PRE @@ -1377,7 +1426,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) goto out_unlock; } - if (!gpu->fe_running) + if (gpu->state == ETNA_GPU_STATE_INITIALIZED) etnaviv_gpu_start_fe_idleloop(gpu, submit->mmu_context); if (submit->prev_mmu_context) @@ -1424,8 +1473,60 @@ static void sync_point_worker(struct work_struct *work) etnaviv_gpu_start_fe(gpu, addr + 2, 2); } +void etnaviv_gpu_recover_hang(struct etnaviv_gem_submit *submit) +{ + struct etnaviv_gpu *gpu = submit->gpu; + char *comm = NULL, *cmd = NULL; + struct task_struct *task; + unsigned int i; + + dev_err(gpu->dev, "recover hung GPU!\n"); + + task = get_pid_task(submit->pid, PIDTYPE_PID); + if (task) { + comm = kstrdup(task->comm, GFP_KERNEL); + cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL); + put_task_struct(task); + } + + if (comm && cmd) + dev_err(gpu->dev, "offending task: %s (%s)\n", comm, cmd); + + kfree(cmd); + kfree(comm); + + if (pm_runtime_get_sync(gpu->dev) < 0) + goto pm_put; + + mutex_lock(&gpu->lock); + + etnaviv_hw_reset(gpu); + + /* complete all events, the GPU won't do it after the reset */ + spin_lock(&gpu->event_spinlock); + for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS) + event_free(gpu, i); + spin_unlock(&gpu->event_spinlock); + + etnaviv_gpu_hw_init(gpu); + + mutex_unlock(&gpu->lock); + pm_runtime_mark_last_busy(gpu->dev); +pm_put: + pm_runtime_put_autosuspend(gpu->dev); +} + static void dump_mmu_fault(struct etnaviv_gpu *gpu) { + static const char *fault_reasons[] = { + "slave not present", + "page not present", + "write violation", + "out of bounds", + "read security violation", + "write security violation", + }; + u32 status_reg, status; int i; @@ -1438,18 +1539,25 @@ static void dump_mmu_fault(struct etnaviv_gpu *gpu) dev_err_ratelimited(gpu->dev, "MMU fault status 0x%08x\n", status); for (i = 0; i < 4; i++) { + const char *reason = "unknown"; u32 address_reg; + u32 mmu_status; - if (!(status & (VIVS_MMUv2_STATUS_EXCEPTION0__MASK << (i * 4)))) + mmu_status = (status >> (i * 4)) & VIVS_MMUv2_STATUS_EXCEPTION0__MASK; + if (!mmu_status) continue; + if ((mmu_status - 1) < ARRAY_SIZE(fault_reasons)) + reason = fault_reasons[mmu_status - 1]; + if (gpu->sec_mode == ETNA_SEC_NONE) address_reg = VIVS_MMUv2_EXCEPTION_ADDR(i); else address_reg = VIVS_MMUv2_SEC_EXCEPTION_ADDR; - dev_err_ratelimited(gpu->dev, "MMU %d fault addr 0x%08x\n", i, - gpu_read(gpu, address_reg)); + dev_err_ratelimited(gpu->dev, + "MMU %d fault (%s) addr 0x%08x\n", + i, reason, gpu_read(gpu, address_reg)); } } @@ -1461,6 +1569,7 @@ static irqreturn_t irq_handler(int irq, void *data) u32 intr = gpu_read(gpu, VIVS_HI_INTR_ACKNOWLEDGE); if (intr != 0) { + ktime_t now = ktime_get(); int event; pm_runtime_mark_last_busy(gpu->dev); @@ -1474,6 +1583,8 @@ static irqreturn_t irq_handler(int irq, void *data) if (intr & VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION) { dump_mmu_fault(gpu); + gpu->state = ETNA_GPU_STATE_FAULT; + drm_sched_fault(&gpu->sched); intr &= ~VIVS_HI_INTR_ACKNOWLEDGE_MMU_EXCEPTION; } @@ -1508,7 +1619,7 @@ static irqreturn_t irq_handler(int irq, void *data) */ if (fence_after(fence->seqno, gpu->completed_fence)) gpu->completed_fence = fence->seqno; - dma_fence_signal(fence); + dma_fence_signal_timestamp(fence, now); event_free(gpu, event); } @@ -1582,9 +1693,9 @@ int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms) } while (1); } -static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) +static void etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) { - if (gpu->initialized && gpu->fe_running) { + if (gpu->state == ETNA_GPU_STATE_RUNNING) { /* Replace the last WAIT with END */ mutex_lock(&gpu->lock); etnaviv_buffer_end(gpu); @@ -1597,15 +1708,12 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) */ etnaviv_gpu_wait_idle(gpu, 100); - gpu->fe_running = false; + gpu->state = ETNA_GPU_STATE_INITIALIZED; } gpu->exec_state = -1; - - return etnaviv_gpu_clk_disable(gpu); } -#ifdef CONFIG_PM static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) { int ret; @@ -1621,7 +1729,6 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) return 0; } -#endif static int etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev, @@ -1689,18 +1796,15 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, if (ret) goto out_workqueue; -#ifdef CONFIG_PM - ret = pm_runtime_get_sync(gpu->dev); -#else - ret = etnaviv_gpu_clk_enable(gpu); -#endif - if (ret < 0) - goto out_sched; - + if (!IS_ENABLED(CONFIG_PM)) { + ret = etnaviv_gpu_clk_enable(gpu); + if (ret < 0) + goto out_sched; + } gpu->drm = drm; gpu->fence_context = dma_fence_context_alloc(1); - idr_init(&gpu->fence_idr); + xa_init_flags(&gpu->user_fences, XA_FLAGS_ALLOC); spin_lock_init(&gpu->fence_spinlock); INIT_WORK(&gpu->sync_point_work, sync_point_worker); @@ -1708,9 +1812,6 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, priv->gpu[priv->num_gpus++] = gpu; - pm_runtime_mark_last_busy(gpu->dev); - pm_runtime_put_autosuspend(gpu->dev); - return 0; out_sched: @@ -1737,24 +1838,22 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, etnaviv_sched_fini(gpu); -#ifdef CONFIG_PM - pm_runtime_get_sync(gpu->dev); - pm_runtime_put_sync_suspend(gpu->dev); -#else - etnaviv_gpu_hw_suspend(gpu); -#endif + if (IS_ENABLED(CONFIG_PM)) { + pm_runtime_get_sync(gpu->dev); + pm_runtime_put_sync_suspend(gpu->dev); + } else { + etnaviv_gpu_hw_suspend(gpu); + etnaviv_gpu_clk_disable(gpu); + } if (gpu->mmu_context) etnaviv_iommu_context_put(gpu->mmu_context); - if (gpu->initialized) { - etnaviv_cmdbuf_free(&gpu->buffer); - etnaviv_iommu_global_fini(gpu); - gpu->initialized = false; - } + etnaviv_cmdbuf_free(&gpu->buffer); + etnaviv_iommu_global_fini(gpu); gpu->drm = NULL; - idr_destroy(&gpu->fence_idr); + xa_destroy(&gpu->user_fences); if (IS_ENABLED(CONFIG_DRM_ETNAVIV_THERMAL)) thermal_cooling_device_unregister(gpu->cooling); @@ -1784,22 +1883,33 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) if (!gpu) return -ENOMEM; - gpu->dev = &pdev->dev; + gpu->dev = dev; mutex_init(&gpu->lock); - mutex_init(&gpu->fence_lock); + mutex_init(&gpu->sched_lock); /* Map registers: */ gpu->mmio = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(gpu->mmio)) return PTR_ERR(gpu->mmio); + + /* Get Reset: */ + gpu->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(gpu->rst)) + return dev_err_probe(dev, PTR_ERR(gpu->rst), + "failed to get reset\n"); + + err = reset_control_assert(gpu->rst); + if (err) + return dev_err_probe(dev, err, "failed to assert reset\n"); + /* Get Interrupt: */ gpu->irq = platform_get_irq(pdev, 0); if (gpu->irq < 0) return gpu->irq; - err = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, 0, - dev_name(gpu->dev), gpu); + err = devm_request_irq(dev, gpu->irq, irq_handler, 0, + dev_name(dev), gpu); if (err) { dev_err(dev, "failed to request IRQ%u: %d\n", gpu->irq, err); return err; @@ -1836,34 +1946,37 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) * autosuspend delay is rather arbitary: no measurements have * yet been performed to determine an appropriate value. */ - pm_runtime_use_autosuspend(gpu->dev); - pm_runtime_set_autosuspend_delay(gpu->dev, 200); - pm_runtime_enable(gpu->dev); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_autosuspend_delay(dev, 200); + pm_runtime_enable(dev); - err = component_add(&pdev->dev, &gpu_ops); + err = component_add(dev, &gpu_ops); if (err < 0) { - dev_err(&pdev->dev, "failed to register component: %d\n", err); + dev_err(dev, "failed to register component: %d\n", err); return err; } return 0; } -static int etnaviv_gpu_platform_remove(struct platform_device *pdev) +static void etnaviv_gpu_platform_remove(struct platform_device *pdev) { + struct etnaviv_gpu *gpu = dev_get_drvdata(&pdev->dev); + component_del(&pdev->dev, &gpu_ops); pm_runtime_disable(&pdev->dev); - return 0; + + mutex_destroy(&gpu->lock); + mutex_destroy(&gpu->sched_lock); } -#ifdef CONFIG_PM static int etnaviv_gpu_rpm_suspend(struct device *dev) { struct etnaviv_gpu *gpu = dev_get_drvdata(dev); u32 idle, mask; /* If there are any jobs in the HW queue, we're not idle */ - if (atomic_read(&gpu->sched.hw_rq_count)) + if (atomic_read(&gpu->sched.credit_count)) return -EBUSY; /* Check whether the hardware (except FE and MC) is idle */ @@ -1876,7 +1989,11 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev) return -EBUSY; } - return etnaviv_gpu_hw_suspend(gpu); + etnaviv_gpu_hw_suspend(gpu); + + gpu->state = ETNA_GPU_STATE_IDENTIFIED; + + return etnaviv_gpu_clk_disable(gpu); } static int etnaviv_gpu_rpm_resume(struct device *dev) @@ -1889,7 +2006,7 @@ static int etnaviv_gpu_rpm_resume(struct device *dev) return ret; /* Re-initialise the basic hardware state */ - if (gpu->drm && gpu->initialized) { + if (gpu->state == ETNA_GPU_STATE_IDENTIFIED) { ret = etnaviv_gpu_hw_resume(gpu); if (ret) { etnaviv_gpu_clk_disable(gpu); @@ -1899,18 +2016,15 @@ static int etnaviv_gpu_rpm_resume(struct device *dev) return 0; } -#endif static const struct dev_pm_ops etnaviv_gpu_pm_ops = { - SET_RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, - NULL) + RUNTIME_PM_OPS(etnaviv_gpu_rpm_suspend, etnaviv_gpu_rpm_resume, NULL) }; struct platform_driver etnaviv_gpu_driver = { .driver = { .name = "etnaviv-gpu", - .owner = THIS_MODULE, - .pm = &etnaviv_gpu_pm_ops, + .pm = pm_ptr(&etnaviv_gpu_pm_ops), .of_match_table = etnaviv_gpu_match, }, .probe = etnaviv_gpu_platform_probe, |
