diff options
Diffstat (limited to 'drivers/gpu')
92 files changed, 1349 insertions, 713 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index eb09037a7161..8ebc5f1eb4c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -306,13 +306,16 @@ struct amdgpu_ring_funcs { uint32_t oa_base, uint32_t oa_size); /* testing functions */ int (*test_ring)(struct amdgpu_ring *ring); - int (*test_ib)(struct amdgpu_ring *ring); + int (*test_ib)(struct amdgpu_ring *ring, long timeout); /* insert NOP packets */ void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); /* pad the indirect buffer to the necessary number of dw */ void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); unsigned (*init_cond_exec)(struct amdgpu_ring *ring); void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); + /* note usage for clock and power gating */ + void (*begin_use)(struct amdgpu_ring *ring); + void (*end_use)(struct amdgpu_ring *ring); }; /* @@ -772,7 +775,6 @@ struct amdgpu_ring { struct amdgpu_fence_driver fence_drv; struct amd_gpu_scheduler sched; - spinlock_t fence_lock; struct amdgpu_bo *ring_obj; volatile uint32_t *ring; unsigned rptr_offs; @@ -799,8 +801,8 @@ struct amdgpu_ring { enum amdgpu_ring_type type; char name[16]; unsigned cond_exe_offs; - u64 cond_exe_gpu_addr; - volatile u32 *cond_exe_cpu_addr; + u64 cond_exe_gpu_addr; + volatile u32 *cond_exe_cpu_addr; #if defined(CONFIG_DEBUG_FS) struct dentry *ent; #endif @@ -1679,6 +1681,7 @@ struct amdgpu_uvd { struct amdgpu_ring ring; struct amdgpu_irq_src irq; bool address_64_bit; + bool use_ctx_buf; struct amd_sched_entity entity; }; @@ -1700,6 +1703,7 @@ struct amdgpu_vce { struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; struct delayed_work idle_work; + struct mutex idle_mutex; const struct firmware *fw; /* VCE firmware */ struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; struct amdgpu_irq_src irq; @@ -2242,7 +2246,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) -#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) +#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c index 252edba16e36..892d60fb225b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c @@ -421,29 +421,6 @@ static int acp_suspend(void *handle) static int acp_resume(void *handle) { - int i, ret; - struct acp_pm_domain *apd; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - /* return early if no ACP */ - if (!adev->acp.acp_genpd) - return 0; - - /* SMU block will power on ACP irrespective of ACP runtime status. - * Power off explicitly based on genpd ACP runtime status so that ACP - * hw and ACP-genpd status are in sync. - * 'suspend_power_off' represents "Power status before system suspend" - */ - if (adev->acp.acp_genpd->gpd.suspend_power_off == true) { - apd = container_of(&adev->acp.acp_genpd->gpd, - struct acp_pm_domain, gpd); - - for (i = 4; i >= 0 ; i--) { - ret = acp_suspend_tile(apd->cgs_dev, ACP_TILE_P1 + i); - if (ret) - pr_err("ACP tile %d tile suspend failed\n", i); - } - } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 9df1bcb35bf0..983175363b06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -551,28 +551,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) le16_to_cpu(firmware_info->info.usReferenceClock); ppll->reference_div = 0; - if (crev < 2) - ppll->pll_out_min = - le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); - else - ppll->pll_out_min = - le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); + ppll->pll_out_min = + le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); ppll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); - if (crev >= 4) { - ppll->lcd_pll_out_min = - le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; - if (ppll->lcd_pll_out_min == 0) - ppll->lcd_pll_out_min = ppll->pll_out_min; - ppll->lcd_pll_out_max = - le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; - if (ppll->lcd_pll_out_max == 0) - ppll->lcd_pll_out_max = ppll->pll_out_max; - } else { + ppll->lcd_pll_out_min = + le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; + if (ppll->lcd_pll_out_min == 0) ppll->lcd_pll_out_min = ppll->pll_out_min; + ppll->lcd_pll_out_max = + le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; + if (ppll->lcd_pll_out_max == 0) ppll->lcd_pll_out_max = ppll->pll_out_max; - } if (ppll->pll_out_min == 0) ppll->pll_out_min = 64800; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 0494fe7b62c0..49de92600074 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -539,7 +539,6 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev) static const struct vga_switcheroo_handler amdgpu_atpx_handler = { .switchto = amdgpu_atpx_switchto, .power_state = amdgpu_atpx_power_state, - .init = amdgpu_atpx_init, .get_client_id = amdgpu_atpx_get_client_id, }; @@ -574,6 +573,7 @@ static bool amdgpu_atpx_detect(void) printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", acpi_method_name); amdgpu_atpx_priv.atpx_detected = true; + amdgpu_atpx_init(); return true; } return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 99ca75baa47d..2b6afe123f3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -352,22 +352,22 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) uint16_t tmp, bios_header_start; r = amdgpu_atrm_get_bios(adev); - if (r == false) + if (!r) r = amdgpu_acpi_vfct_bios(adev); - if (r == false) + if (!r) r = igp_read_bios_from_vram(adev); - if (r == false) + if (!r) r = amdgpu_read_bios(adev); - if (r == false) { + if (!r) { r = amdgpu_read_bios_from_rom(adev); } - if (r == false) { + if (!r) { r = amdgpu_read_disabled_bios(adev); } - if (r == false) { + if (!r) { r = amdgpu_read_platform_bios(adev); } - if (r == false || adev->bios == NULL) { + if (!r || adev->bios == NULL) { DRM_ERROR("Unable to locate a BIOS ROM\n"); adev->bios = NULL; return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index ee95e950a19b..bc0440f7a31d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -810,7 +810,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, } static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, - struct cgs_system_info *sys_info) + struct cgs_system_info *sys_info) { CGS_FUNC_ADEV; @@ -830,6 +830,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, case CGS_SYSTEM_INFO_PCIE_MLW: sys_info->value = adev->pm.pcie_mlw_mask; break; + case CGS_SYSTEM_INFO_PCIE_DEV: + sys_info->value = adev->pdev->device; + break; + case CGS_SYSTEM_INFO_PCIE_REV: + sys_info->value = adev->pdev->revision; + break; case CGS_SYSTEM_INFO_CG_FLAGS: sys_info->value = adev->cg_flags; break; @@ -915,14 +921,12 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, acpi_handle handle; struct acpi_object_list input; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *params = NULL; - union acpi_object *obj = NULL; + union acpi_object *params, *obj; uint8_t name[5] = {'\0'}; - struct cgs_acpi_method_argument *argument = NULL; + struct cgs_acpi_method_argument *argument; uint32_t i, count; acpi_status status; - int result = 0; - uint32_t func_no = 0xFFFFFFFF; + int result; handle = ACPI_HANDLE(&adev->pdev->dev); if (!handle) @@ -939,7 +943,6 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if (info->pinput_argument == NULL) return -EINVAL; argument = info->pinput_argument; - func_no = argument->value; for (i = 0; i < info->input_count; i++) { if (((argument->type == ACPI_TYPE_STRING) || (argument->type == ACPI_TYPE_BUFFER)) && @@ -1008,7 +1011,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if (ACPI_FAILURE(status)) { result = -EIO; - goto error; + goto free_input; } /* return the output info */ @@ -1018,7 +1021,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if ((obj->type != ACPI_TYPE_PACKAGE) || (obj->package.count != count)) { result = -EIO; - goto error; + goto free_obj; } params = obj->package.elements; } else @@ -1026,13 +1029,13 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if (params == NULL) { result = -EIO; - goto error; + goto free_obj; } for (i = 0; i < count; i++) { if (argument->type != params->type) { result = -EIO; - goto error; + goto free_obj; } switch (params->type) { case ACPI_TYPE_INTEGER: @@ -1042,7 +1045,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, if ((params->string.length != argument->data_length) || (params->string.pointer == NULL)) { result = -EIO; - goto error; + goto free_obj; } strncpy(argument->pointer, params->string.pointer, @@ -1051,7 +1054,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, case ACPI_TYPE_BUFFER: if (params->buffer.pointer == NULL) { result = -EIO; - goto error; + goto free_obj; } memcpy(argument->pointer, params->buffer.pointer, @@ -1064,9 +1067,10 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, params++; } -error: - if (obj != NULL) - kfree(obj); + result = 0; +free_obj: + kfree(obj); +free_input: kfree((void *)input.pointer); return result; } @@ -1078,7 +1082,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, } #endif -int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, +static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, uint32_t acpi_method, uint32_t acpi_function, void *pinput, void *poutput, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 614fb026436d..df7ab2458e50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1911,7 +1911,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) } drm_kms_helper_poll_enable(dev); + + /* + * Most of the connector probing functions try to acquire runtime pm + * refs to ensure that the GPU is powered on when connector polling is + * performed. Since we're calling this from a runtime PM callback, + * trying to acquire rpm refs will cause us to deadlock. + * + * Since we're guaranteed to be holding the rpm lock, it's safe to + * temporarily disable the rpm helpers so this doesn't deadlock us. + */ +#ifdef CONFIG_PM + dev->dev->power.disable_depth++; +#endif drm_helper_hpd_irq_event(dev); +#ifdef CONFIG_PM + dev->dev->power.disable_depth--; +#endif if (fbcon) { amdgpu_fbdev_set_suspend(adev, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 7dbe8d02c5a6..76f96028313d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -122,7 +122,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work) spin_unlock_irqrestore(&crtc->dev->event_lock, flags); usleep_range(min_udelay, 2 * min_udelay); spin_lock_irqsave(&crtc->dev->event_lock, flags); - }; + } if (!repcnt) DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " @@ -516,9 +516,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); - if (amdgpu_fb->obj) { - drm_gem_object_unreference_unlocked(amdgpu_fb->obj); - } + drm_gem_object_unreference_unlocked(amdgpu_fb->obj); drm_framebuffer_cleanup(fb); kfree(amdgpu_fb); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 015f1f4aae53..9aa533cf4ad1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -52,9 +52,10 @@ * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same * at the end of IBs. + * - 3.3.0 - Add VM support for UVD on supported hardware. */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 2 +#define KMS_DRIVER_MINOR 3 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 428ebf3a4387..a31d7ef3032c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -33,6 +33,8 @@ #include "amdgpu.h" #include "atom.h" +#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000) + /* * IB * IBs (Indirect Buffers) and areas of GPU accessible memory where @@ -286,7 +288,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev) if (!ring || !ring->ready) continue; - r = amdgpu_ring_test_ib(ring); + r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT); if (r) { ring->ready = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a8efbb54423f..d942654a1de0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -584,6 +584,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); + amdgpu_uvd_free_handles(adev, file_priv); + amdgpu_vce_free_handles(adev, file_priv); + amdgpu_vm_fini(adev, &fpriv->vm); idr_for_each_entry(&fpriv->bo_list_handles, list, handle) @@ -608,10 +611,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev, void amdgpu_driver_preclose_kms(struct drm_device *dev, struct drm_file *file_priv) { - struct amdgpu_device *adev = dev->dev_private; - - amdgpu_uvd_free_handles(adev, file_priv); - amdgpu_vce_free_handles(adev, file_priv); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3b885e3e9b56..85aeb0a804bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -75,6 +75,10 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) ring->count_dw = ndw; ring->wptr_old = ring->wptr; + + if (ring->funcs->begin_use) + ring->funcs->begin_use(ring); + return 0; } @@ -127,6 +131,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) mb(); amdgpu_ring_set_wptr(ring); + + if (ring->funcs->end_use) + ring->funcs->end_use(ring); } /** @@ -139,6 +146,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring) void amdgpu_ring_undo(struct amdgpu_ring *ring) { ring->wptr = ring->wptr_old; + + if (ring->funcs->end_use) + ring->funcs->end_use(ring); } /** @@ -198,7 +208,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4); ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs]; - spin_lock_init(&ring->fence_lock); r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); if (r) { dev_err(adev->dev, "failed initializing fences (%d).\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index a46a64c125d1..b11f4e8868d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -41,8 +41,15 @@ /* 1 second timeout */ #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) + +/* Firmware versions for VI */ +#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8)) +#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8)) +#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8)) +#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8)) + /* Polaris10/11 firmware version */ -#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) +#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) /* Firmware Names */ #ifdef CONFIG_DRM_AMDGPU_CIK @@ -92,7 +99,6 @@ MODULE_FIRMWARE(FIRMWARE_STONEY); MODULE_FIRMWARE(FIRMWARE_POLARIS10); MODULE_FIRMWARE(FIRMWARE_POLARIS11); -static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); static void amdgpu_uvd_idle_work_handler(struct work_struct *work); int amdgpu_uvd_sw_init(struct amdgpu_device *adev) @@ -246,6 +252,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev) if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) adev->uvd.address_64_bit = true; + switch (adev->asic_type) { + case CHIP_TONGA: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10; + break; + case CHIP_CARRIZO: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11; + break; + case CHIP_FIJI: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12; + break; + case CHIP_STONEY: + adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15; + break; + default: + adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10; + } + return 0; } @@ -346,8 +369,6 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) if (handle != 0 && adev->uvd.filp[i] == filp) { struct fence *fence; - amdgpu_uvd_note_usage(adev); - r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); if (r) { @@ -438,7 +459,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, unsigned fs_in_mb = width_in_mb * height_in_mb; unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; - unsigned min_ctx_size = 0; + unsigned min_ctx_size = ~0; image_size = width * height; image_size += image_size / 2; @@ -557,7 +578,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg, /* reference picture buffer */ min_dpb_size = image_size * num_dpb_buffer; - if (adev->asic_type < CHIP_POLARIS10){ + if (!adev->uvd.use_ctx_buf){ /* macroblock context buffer */ min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; @@ -913,8 +934,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) return -EINVAL; } - amdgpu_uvd_note_usage(ctx.parser->adev); - return 0; } @@ -1106,10 +1125,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) if (fences == 0 && handles == 0) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_uvd(adev, false); - /* just work around for uvd clock remain high even - * when uvd dpm disabled on Polaris10 */ - if (adev->asic_type == CHIP_POLARIS10) - amdgpu_asic_set_uvd_clocks(adev, 0, 0); } else { amdgpu_asic_set_uvd_clocks(adev, 0, 0); } @@ -1118,11 +1133,10 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work) } } -static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) +void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring) { + struct amdgpu_device *adev = ring->adev; bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); - set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, - UVD_IDLE_TIMEOUT); if (set_clocks) { if (adev->pm.dpm_enabled) { @@ -1132,3 +1146,48 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) } } } + +void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); +} + +/** + * amdgpu_uvd_ring_test_ib - test ib execution + * + * @ring: amdgpu_ring pointer + * + * Test if we can successfully execute an IB + */ +int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout) +{ + struct fence *fence; + long r; + + r = amdgpu_uvd_get_create_msg(ring, 1, NULL); + if (r) { + DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); + goto error; + } + + r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); + if (r) { + DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); + goto error; + } + + r = fence_wait_timeout(fence, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); + } else { + DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; + } + +error: + fence_put(fence); + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h index 9a3b449081a7..c850009602d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h @@ -35,5 +35,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); +void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring); +int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index aeeeb72ebbc4..05865ce35351 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -85,8 +85,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) unsigned ucode_version, version_major, version_minor, binary_id; int i, r; - INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); - switch (adev->asic_type) { #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: @@ -197,6 +195,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size) adev->vce.filp[i] = NULL; } + INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler); + mutex_init(&adev->vce.idle_mutex); + return 0; } @@ -220,6 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) amdgpu_ring_fini(&adev->vce.ring[1]); release_firmware(adev->vce.fw); + mutex_destroy(&adev->vce.idle_mutex); return 0; } @@ -315,19 +317,19 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work) } /** - * amdgpu_vce_note_usage - power up VCE + * amdgpu_vce_ring_begin_use - power up VCE * - * @adev: amdgpu_device pointer + * @ring: amdgpu ring * * Make sure VCE is powerd up when we want to use it */ -static void amdgpu_vce_note_usage(struct amdgpu_device *adev) +void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring) { - bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); - - set_clocks &= schedule_delayed_work(&adev->vce.idle_work, - VCE_IDLE_TIMEOUT); + struct amdgpu_device *adev = ring->adev; + bool set_clocks; + mutex_lock(&adev->vce.idle_mutex); + set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); if (set_clocks) { if (adev->pm.dpm_enabled) { amdgpu_dpm_enable_vce(adev, true); @@ -335,6 +337,19 @@ static void amdgpu_vce_note_usage(struct amdgpu_device *adev) amdgpu_asic_set_vce_clocks(adev, 53300, 40000); } } + mutex_unlock(&adev->vce.idle_mutex); +} + +/** + * amdgpu_vce_ring_end_use - power VCE down + * + * @ring: amdgpu ring + * + * Schedule work to power VCE down again + */ +void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring) +{ + schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT); } /** @@ -355,8 +370,6 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) if (!handle || adev->vce.filp[i] != filp) continue; - amdgpu_vce_note_usage(adev); - r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); if (r) DRM_ERROR("Error destroying VCE handle (%d)!\n", r); @@ -464,7 +477,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_job *job; struct amdgpu_ib *ib; struct fence *f = NULL; - uint64_t dummy; int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); @@ -472,7 +484,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, return r; ib = &job->ibs[0]; - dummy = ib->gpu_addr + 1024; /* stitch together an VCE destroy msg */ ib->length_dw = 0; @@ -480,11 +491,14 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ ib->ptr[ib->length_dw++] = handle; - ib->ptr[ib->length_dw++] = 0x00000014; /* len */ - ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ - ib->ptr[ib->length_dw++] = upper_32_bits(dummy); - ib->ptr[ib->length_dw++] = dummy; - ib->ptr[ib->length_dw++] = 0x00000001; + ib->ptr[ib->length_dw++] = 0x00000020; /* len */ + ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ + ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */ + ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */ + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0x00000000; + ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */ + ib->ptr[ib->length_dw++] = 0x00000000; ib->ptr[ib->length_dw++] = 0x00000008; /* len */ ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ @@ -622,8 +636,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx) uint32_t *size = &tmp; int i, r = 0, idx = 0; - amdgpu_vce_note_usage(p->adev); - while (idx < ib->length_dw) { uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); @@ -833,10 +845,10 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring) * @ring: the engine to test on * */ -int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) +int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct fence *fence = NULL; - int r; + long r; /* skip vce ring1 ib test for now, since it's not reliable */ if (ring == &ring->adev->vce.ring[1]) @@ -844,21 +856,25 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) r = amdgpu_vce_get_create_msg(ring, 1, NULL); if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r); goto error; } r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r); goto error; } - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(fence, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); } else { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } error: fence_put(fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index f40cf761c66f..63f83d0d985c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -39,6 +39,8 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags); int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); -int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring); +int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout); +void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring); +void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index 573bc973d692..e2f0e5d58d5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -102,12 +102,14 @@ static const struct ci_pt_defaults defaults_saturn_xt = { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } }; +#if 0 static const struct ci_pt_defaults defaults_saturn_pro = { 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } }; +#endif static const struct ci_pt_config_reg didt_config_ci[] = { @@ -3034,7 +3036,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev, if (pi->mclk_stutter_mode_threshold && (memory_clock <= pi->mclk_stutter_mode_threshold) && - (pi->uvd_enabled == false) && + (!pi->uvd_enabled) && (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && (adev->pm.dpm.new_active_crtc_count <= 2)) memory_level->StutterEnable = true; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index a7de4d18ac94..4efc901f658c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -879,7 +879,7 @@ static void cik_vga_set_state(struct amdgpu_device *adev, bool state) uint32_t tmp; tmp = RREG32(mmCONFIG_CNTL); - if (state == false) + if (!state) tmp |= CONFIG_CNTL__VGA_DIS_MASK; else tmp &= ~CONFIG_CNTL__VGA_DIS_MASK; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 6507a7ee75e4..ee6466912497 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -354,7 +354,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable) u32 me_cntl; int i; - if (enable == false) { + if (!enable) { cik_sdma_gfx_stop(adev); cik_sdma_rlc_stop(adev); } @@ -617,19 +617,19 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (CIK). * Returns 0 on success, error on failure. */ -static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) +static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -639,7 +639,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } @@ -654,14 +654,19 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; + goto err1; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 8ba07e79d4cb..2a11413ed54a 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev) pi->mgcg_cgtt_local1 = 0x0; pi->clock_slow_down_step = 25000; pi->skip_clock_slow_down = 1; - pi->enable_nb_ps_policy = 0; + pi->enable_nb_ps_policy = false; pi->caps_power_containment = true; pi->caps_cac = true; pi->didt_enabled = false; diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c index b336c918d6a7..b3e19ba4c57f 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c @@ -173,7 +173,7 @@ static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!fiji_is_smc_ram_running(adev)) { - return -EINVAL;; + return -EINVAL; } if (wait_smu_response(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 1ac5ad01bf58..d869d058ef24 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2105,25 +2105,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, * Provides a basic gfx ring test to verify that IBs are working. * Returns 0 on success, error on failure. */ -static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) +static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; - int r; + long r; r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); return r; } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); @@ -2135,14 +2135,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err2; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; + goto err2; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; } tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d97b962bc3de..bff8668e9e6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -787,25 +787,25 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) return r; } -static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) +static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; uint32_t scratch; uint32_t tmp = 0; - int r; + long r; r = amdgpu_gfx_scratch_get(adev, &scratch); if (r) { - DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); + DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r); return r; } WREG32(scratch, 0xCAFEDEAD); memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err1; } ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); @@ -817,14 +817,19 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err2; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out.\n"); + r = -ETIMEDOUT; + goto err2; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err2; } tmp = RREG32(scratch); if (tmp == 0xDEADBEEF) { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c index 528571285e69..211839913728 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c @@ -211,7 +211,7 @@ static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!iceland_is_smc_ram_running(adev)) - return -EINVAL;; + return -EINVAL; if (wait_smu_response(adev)) { DRM_ERROR("Failed to send previous message\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h index 1e0769e110fa..5983e3150cc5 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_smum.h +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h @@ -21,8 +21,8 @@ * */ -#ifndef ICELAND_SMUMGR_H -#define ICELAND_SMUMGR_H +#ifndef ICELAND_SMUM_H +#define ICELAND_SMUM_H #include "ppsmc.h" diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index 0111d153411b..1351c7e834a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -393,7 +393,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable) u32 f32_cntl; int i; - if (enable == false) { + if (!enable) { sdma_v2_4_gfx_stop(adev); sdma_v2_4_rlc_stop(adev); } @@ -668,19 +668,19 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ -static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) +static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -690,7 +690,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } @@ -709,14 +709,19 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; + goto err1; + } else if (r) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 2b8ce5808a14..653ce5ed55ae 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -604,7 +604,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) u32 f32_cntl; int i; - if (enable == false) { + if (!enable) { sdma_v3_0_gfx_stop(adev); sdma_v3_0_rlc_stop(adev); } @@ -896,19 +896,19 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) * Test a simple IB in the DMA ring (VI). * Returns 0 on success, error on failure. */ -static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) +static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib ib; struct fence *f = NULL; unsigned index; - int r; u32 tmp = 0; u64 gpu_addr; + long r; r = amdgpu_wb_get(adev, &index); if (r) { - dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); + dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r); return r; } @@ -918,7 +918,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) memset(&ib, 0, sizeof(ib)); r = amdgpu_ib_get(adev, NULL, 256, &ib); if (r) { - DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); + DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r); goto err0; } @@ -937,14 +937,19 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) if (r) goto err1; - r = fence_wait(f, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); + r = fence_wait_timeout(f, false, timeout); + if (r == 0) { + DRM_ERROR("amdgpu: IB test timed out\n"); + r = -ETIMEDOUT; + goto err1; + } else if (r < 0) { + DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r); goto err1; } tmp = le32_to_cpu(adev->wb.wb[index]); if (tmp == 0xDEADBEEF) { DRM_INFO("ib test on ring %d succeeded\n", ring->idx); + r = 0; } else { DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); r = -EINVAL; diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c index 083893dd68c0..940de1836f8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c @@ -173,7 +173,7 @@ static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg) { if (!tonga_is_smc_ram_running(adev)) { - return -EINVAL;; + return -EINVAL; } if (wait_smu_response(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index 416c8567d3ed..132e613ed674 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -527,49 +527,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring, } /** - * uvd_v4_2_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - struct fence *fence = NULL; - int r; - - r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - if (r) { - DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); - return r; - } - - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } - - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } - - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - amdgpu_asic_set_uvd_clocks(adev, 0, 0); - return r; -} - -/** * uvd_v4_2_mc_resume - memory controller programming * * @adev: amdgpu_device pointer @@ -794,9 +751,11 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate, .test_ring = uvd_v4_2_ring_test_ring, - .test_ib = uvd_v4_2_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index dd636c4c4b08..101de136ba63 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -577,49 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -/** - * uvd_v5_0_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - struct fence *fence = NULL; - int r; - - r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); - if (r) { - DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r); - return r; - } - - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } - - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } - - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - amdgpu_asic_set_uvd_clocks(adev, 0, 0); - return r; -} - static bool uvd_v5_0_is_idle(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -845,9 +802,11 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate, .test_ring = uvd_v5_0_ring_test_ring, - .test_ib = uvd_v5_0_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 07e9a987fbee..7f21102bfb99 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -34,6 +34,7 @@ #include "smu/smu_7_1_3_d.h" #include "smu/smu_7_1_3_sh_mask.h" #include "bif/bif_5_1_d.h" +#include "gmc/gmc_8_1_d.h" #include "vi.h" static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); @@ -672,6 +673,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); + amdgpu_ring_write(ring, vm_id); + amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); @@ -680,39 +684,55 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, amdgpu_ring_write(ring, ib->length_dw); } -/** - * uvd_v6_0_ring_test_ib - test ib execution - * - * @ring: amdgpu_ring pointer - * - * Test if we can successfully execute an IB - */ -static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring) +static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, + unsigned vm_id, uint64_t pd_addr) { - struct fence *fence = NULL; - int r; + uint32_t reg; - r = amdgpu_uvd_get_create_msg(ring, 1, NULL); - if (r) { - DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); - goto error; - } + if (vm_id < 8) + reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id; + else + reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8; - r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); - if (r) { - DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); - goto error; - } + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, reg << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, pd_addr >> 12); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0x8); - r = fence_wait(fence, false); - if (r) { - DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); - goto error; - } - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); -error: - fence_put(fence); - return r; + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 1 << vm_id); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0x8); + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); + amdgpu_ring_write(ring, 1 << vm_id); /* mask */ + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0xC); +} + +static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) +{ + uint32_t seq = ring->fence_drv.sync_seq; + uint64_t addr = ring->fence_drv.gpu_addr; + + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); + amdgpu_ring_write(ring, 0xffffffff); /* mask */ + amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0)); + amdgpu_ring_write(ring, seq); + amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); + amdgpu_ring_write(ring, 0xE); } static bool uvd_v6_0_is_idle(void *handle) @@ -951,7 +971,7 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = { .set_powergating_state = uvd_v6_0_set_powergating_state, }; -static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { +static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { .get_rptr = uvd_v6_0_ring_get_rptr, .get_wptr = uvd_v6_0_ring_get_wptr, .set_wptr = uvd_v6_0_ring_set_wptr, @@ -961,14 +981,41 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, .test_ring = uvd_v6_0_ring_test_ring, - .test_ib = uvd_v6_0_ring_test_ib, + .test_ib = amdgpu_uvd_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, +}; + +static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { + .get_rptr = uvd_v6_0_ring_get_rptr, + .get_wptr = uvd_v6_0_ring_get_wptr, + .set_wptr = uvd_v6_0_ring_set_wptr, + .parse_cs = NULL, + .emit_ib = uvd_v6_0_ring_emit_ib, + .emit_fence = uvd_v6_0_ring_emit_fence, + .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, + .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, + .test_ring = uvd_v6_0_ring_test_ring, + .test_ib = amdgpu_uvd_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_uvd_ring_begin_use, + .end_use = amdgpu_uvd_ring_end_use, }; static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) { - adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; + if (adev->asic_type >= CHIP_POLARIS10) { + adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs; + DRM_INFO("UVD is enabled in VM mode\n"); + } else { + adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs; + DRM_INFO("UVD is enabled in physical mode\n"); + } } static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c index 45d92aceb485..80a37a602181 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c @@ -594,6 +594,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = { .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, }; static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index d7b8da433fe2..c271abffd8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c @@ -210,11 +210,11 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev, static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev) { int i, j; - uint32_t status = 0; for (i = 0; i < 10; ++i) { for (j = 0; j < 100; ++j) { - status = RREG32(mmVCE_STATUS); + uint32_t status = RREG32(mmVCE_STATUS); + if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK) return 0; mdelay(10); @@ -655,6 +655,18 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable) +{ + u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL); + + if (enable) + tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; + else + tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK; + + WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp); +} + static int vce_v3_0_set_clockgating_state(void *handle, enum amd_clockgating_state state) { @@ -662,6 +674,9 @@ static int vce_v3_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; int i; + if (adev->asic_type == CHIP_POLARIS10) + vce_v3_set_bypass_mode(adev, enable); + if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) return 0; @@ -752,6 +767,8 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = { .test_ib = amdgpu_vce_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vce_ring_begin_use, + .end_use = amdgpu_vce_ring_end_use, }; static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h index 293329719bba..809759f7bb81 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h @@ -27,6 +27,7 @@ #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 +#define mmCC_BIF_BX_STRAP2 0x152A #define mmBIF_MM_INDACCESS_CNTL 0x1500 #define mmBIF_DOORBELL_APER_EN 0x1501 #define mmBUS_CNTL 0x1508 diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h index 6f6fb34742d2..ec69869c55ff 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h @@ -111,6 +111,8 @@ #define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5 #define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4 #define mmUVD_JPEG_ADDR_CONFIG 0x3a1f +#define mmUVD_GP_SCRATCH8 0x3c0a +#define mmUVD_GP_SCRATCH9 0x3c0b #define mmUVD_GP_SCRATCH4 0x3d38 #endif /* UVD_6_0_D_H */ diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index f32af2f2091a..b86aba9d019f 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -113,6 +113,8 @@ enum cgs_system_info_id { CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, CGS_SYSTEM_INFO_PCIE_GEN_INFO, CGS_SYSTEM_INFO_PCIE_MLW, + CGS_SYSTEM_INFO_PCIE_DEV, + CGS_SYSTEM_INFO_PCIE_REV, CGS_SYSTEM_INFO_CG_FLAGS, CGS_SYSTEM_INFO_PG_FLAGS, CGS_SYSTEM_INFO_GFX_CU_INFO, @@ -121,13 +123,13 @@ enum cgs_system_info_id { }; struct cgs_system_info { - uint64_t size; - uint64_t info_id; + uint64_t size; + enum cgs_system_info_id info_id; union { - void *ptr; - uint64_t value; + void *ptr; + uint64_t value; }; - uint64_t padding[13]; + uint64_t padding[13]; }; /* diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index f9e03ad0baa2..abbb658bdc1e 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -176,7 +176,7 @@ static int pp_hw_fini(void *handle) static bool pp_is_idle(void *handle) { - return 0; + return false; } static int pp_wait_for_idle(void *handle) diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c index d6635cc4b0fc..635fc4b48184 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c @@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[] = { system_config_tasks, setup_asic_tasks, enable_dynamic_state_management_tasks, - enable_clock_power_gatings_tasks, get_2d_performance_state_tasks, set_performance_state_tasks, initialize_thermal_controller_tasks, @@ -140,7 +139,6 @@ static const pem_event_action * const resume_event[] = { setup_asic_tasks, enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ enable_dynamic_state_management_tasks, - enable_clock_power_gatings_tasks, enable_disable_bapm_tasks, initialize_thermal_controller_tasks, get_2d_performance_state_tasks, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 2da548f6337e..2028980f1ed4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -177,12 +177,12 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) cz_dpm_powerdown_uvd(hwmgr); } else { cz_dpm_powerup_uvd(hwmgr); - cgs_set_clockgating_state(hwmgr->device, - AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); cgs_set_powergating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, AMD_CG_STATE_UNGATE); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); cz_dpm_update_uvd_dpm(hwmgr, false); } @@ -211,14 +211,14 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) } else { cz_dpm_powerup_vce(hwmgr); cz_hwmgr->vce_power_gated = false; - cgs_set_clockgating_state( - hwmgr->device, - AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); cgs_set_powergating_state( hwmgr->device, AMD_IP_BLOCK_TYPE_VCE, AMD_CG_STATE_UNGATE); + cgs_set_clockgating_state( + hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, true); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 9bf622e123b6..8cc0df9b534a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1167,9 +1167,9 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, cz_ps->action = cz_current_ps->action; - if ((force_high == false) && (cz_ps->action == FORCE_HIGH)) + if (!force_high && (cz_ps->action == FORCE_HIGH)) cz_ps->action = CANCEL_FORCE_HIGH; - else if ((force_high == true) && (cz_ps->action != FORCE_HIGH)) + else if (force_high && (cz_ps->action != FORCE_HIGH)) cz_ps->action = FORCE_HIGH; else cz_ps->action = DO_NOTHING; @@ -1656,7 +1656,7 @@ static void cz_hw_print_display_cfg( struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); uint32_t data = 0; - if (hw_data->cc6_settings.cc6_setting_changed == true) { + if (hw_data->cc6_settings.cc6_setting_changed) { hw_data->cc6_settings.cc6_setting_changed = false; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c index e1b649bd5344..5afe82068b29 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c @@ -56,7 +56,7 @@ int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) fiji_update_uvd_dpm(hwmgr, false); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); + AMD_CG_STATE_UNGATE); } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 744aa886a2be..120a9e2c3152 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -698,7 +698,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) if (0 == result) { struct cgs_system_info sys_info = {0}; - data->is_tlu_enabled = 0; + data->is_tlu_enabled = false; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = FIJI_MAX_HARDWARE_POWERLEVELS; hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; @@ -1450,7 +1450,7 @@ static void fiji_setup_pcie_table_entry( { dpm_table->dpm_levels[index].value = pcie_gen; dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = 1; + dpm_table->dpm_levels[index].enabled = true; } static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr) @@ -1662,7 +1662,6 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, { uint32_t count; uint8_t index; - int result = 0; struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1684,7 +1683,7 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr, VOLTAGE_SCALE)) / 25); } - return result; + return 0; } /** @@ -4389,8 +4388,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_FreezeLevel), "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", @@ -4400,8 +4400,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_FreezeLevel), "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", @@ -4571,7 +4572,6 @@ static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr, static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct fiji_power_state *fiji_ps) { - int result = 0; struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); uint32_t high_limit_count; @@ -4591,7 +4591,7 @@ static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr, fiji_ps->performance_levels[0].memory_clock, fiji_ps->performance_levels[high_limit_count].memory_clock); - return result; + return 0; } static int fiji_generate_dpm_level_enable_mask( @@ -4850,8 +4850,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", @@ -4861,8 +4862,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled",); + PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", + ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c index 7a705cee0cc2..a6abe81bc843 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c @@ -59,8 +59,8 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr, struct phm_runtime_table_header *rt_table, void *input, void *output) { - int result = 0; - void *temp_storage = NULL; + int result; + void *temp_storage; if (hwmgr == NULL || rt_table == NULL) { printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n"); @@ -73,12 +73,13 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr, printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n"); return -ENOMEM; } + } else { + temp_storage = NULL; } result = phm_run_table(hwmgr, rt_table, input, output, temp_storage); - if (NULL != temp_storage) - kfree(temp_storage); + kfree(temp_storage); return result; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 03b6128ebc20..27e07624ac28 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -534,7 +534,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr /* initialize vddc_dep_on_dal_pwrl table */ table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); - table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + table_clk_vlt = kzalloc(table_size, GFP_KERNEL); if (NULL == table_clk_vlt) { printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c index aeec25c66aa8..b5edb5105986 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c @@ -116,7 +116,7 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) polaris10_update_uvd_dpm(hwmgr, false); cgs_set_clockgating_state(hwmgr->device, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); + AMD_CG_STATE_UNGATE); } return 0; @@ -131,11 +131,19 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) data->vce_power_gated = bgate; - if (bgate) + if (bgate) { + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_GATE); + polaris10_update_vce_dpm(hwmgr, true); polaris10_phm_powerdown_vce(hwmgr); - else + } else { polaris10_phm_powerup_vce(hwmgr); - + polaris10_update_vce_dpm(hwmgr, false); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_VCE, + AMD_CG_STATE_UNGATE); + } return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index d1b528b401e9..769636a0c5b5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -1473,7 +1473,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, /* Get MinVoltage and Frequency from DPM0, * already converted to SMC_UL */ - sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value; + sclk_frequency = data->vbios_boot_state.sclk_bootup_value; result = polaris10_get_dependency_volt_by_clk(hwmgr, table_info->vdd_dep_on_sclk, sclk_frequency, @@ -1509,8 +1509,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr, /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ - table->MemoryACPILevel.MclkFrequency = - data->dpm_table.mclk_table.dpm_levels[0].value; + table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value; result = polaris10_get_dependency_volt_by_clk(hwmgr, table_info->vdd_dep_on_mclk, table->MemoryACPILevel.MclkFrequency, @@ -2669,6 +2668,10 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to enable deep sleep master switch!", result = tmp_result); + tmp_result = polaris10_enable_didt_config(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to enable deep sleep master switch!", result = tmp_result); + tmp_result = polaris10_start_dpm(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to start DPM!", result = tmp_result); @@ -2808,13 +2811,13 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_DynamicUVDState); /* power tune caps Assume disabled */ - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping); - phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); if (hwmgr->powercontainment_enabled) @@ -3258,7 +3261,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) if (0 == result) { struct cgs_system_info sys_info = {0}; - data->is_tlu_enabled = 0; + data->is_tlu_enabled = false; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = POLARIS10_MAX_HARDWARE_POWERLEVELS; @@ -3641,6 +3644,7 @@ static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); + disable_mclk_switching = (1 < info.display_count) || disable_mclk_switching_for_frame_lock; @@ -4145,8 +4149,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_FreezeLevel), @@ -4157,8 +4161,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_MCLKDPM_FreezeLevel), @@ -4318,7 +4322,6 @@ static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr, static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct polaris10_power_state *polaris10_ps) { - int result = 0; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); uint32_t high_limit_count; @@ -4338,7 +4341,7 @@ static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr, polaris10_ps->performance_levels[0].memory_clock, polaris10_ps->performance_levels[high_limit_count].memory_clock); - return result; + return 0; } static int polaris10_generate_dpm_level_enable_mask( @@ -4421,25 +4424,20 @@ int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate) return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate); } -static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) +int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate) { - const struct phm_set_power_state_input *states = - (const struct phm_set_power_state_input *)input; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - const struct polaris10_power_state *polaris10_nps = - cast_const_phw_polaris10_power_state(states->pnew_state); - const struct polaris10_power_state *polaris10_cps = - cast_const_phw_polaris10_power_state(states->pcurrent_state); - uint32_t mm_boot_level_offset, mm_boot_level_value; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); - if (polaris10_nps->vce_clks.evclk > 0 && - (polaris10_cps == NULL || polaris10_cps->vce_clks.evclk == 0)) { - - data->smc_state_table.VceBootLevel = + if (!bgate) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_StablePState)) + data->smc_state_table.VceBootLevel = (uint8_t) (table_info->mm_dep_table->count - 1); + else + data->smc_state_table.VceBootLevel = 0; mm_boot_level_offset = data->dpm_table_start + offsetof(SMU74_Discrete_DpmTable, VceBootLevel); @@ -4452,18 +4450,14 @@ static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_VCEDPM_SetEnabledMask, (uint32_t)1 << data->smc_state_table.VceBootLevel); - - polaris10_enable_disable_vce_dpm(hwmgr, true); - } else if (polaris10_nps->vce_clks.evclk == 0 && - polaris10_cps != NULL && - polaris10_cps->vce_clks.evclk > 0) - polaris10_enable_disable_vce_dpm(hwmgr, false); } + polaris10_enable_disable_vce_dpm(hwmgr, !bgate); + return 0; } @@ -4548,8 +4542,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), @@ -4560,8 +4554,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel), @@ -4617,6 +4611,8 @@ static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr) return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; } + + static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) { int tmp_result, result = 0; @@ -4650,11 +4646,6 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i "Failed to generate DPM level enabled mask!", result = tmp_result); - tmp_result = polaris10_update_vce_dpm(hwmgr, input); - PP_ASSERT_WITH_CODE((0 == tmp_result), - "Failed to update VCE DPM!", - result = tmp_result); - tmp_result = polaris10_update_sclk_threshold(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update SCLK threshold!", @@ -4725,6 +4716,7 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ polaris10_notify_smc_display_change(hwmgr, false); + return 0; } @@ -4774,6 +4766,7 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr) cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h index dbc6d9bfd5af..33c33947e827 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h @@ -352,6 +352,6 @@ int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr); int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); - +int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate); #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c index 5620e268b553..b9cb240a135d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c @@ -28,10 +28,360 @@ #include "polaris10_smumgr.h" #include "smu74_discrete.h" #include "pp_debug.h" +#include "gca/gfx_8_0_d.h" +#include "gca/gfx_8_0_sh_mask.h" +#include "oss/oss_3_0_sh_mask.h" #define VOLTAGE_SCALE 4 #define POWERTUNE_DEFAULT_SET_MAX 1 +uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK; + +struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { 0xFFFFFFFF } +}; + +struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND }, + { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND }, + + { 0xFFFFFFFF } +}; + +struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { 0xFFFFFFFF } +}; + +struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = { +/* --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + * Offset Mask Shift Value Type + * --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + */ + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND }, + { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND }, + { 0xFFFFFFFF } +}; + static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ @@ -209,6 +559,187 @@ static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable) +{ + + uint32_t en = enable ? 1 : 0; + int32_t result = 0; + uint32_t data; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0); + data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data); + DIDTBlock_Info &= ~SQ_Enable_MASK; + DIDTBlock_Info |= en << SQ_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0); + data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data); + DIDTBlock_Info &= ~DB_Enable_MASK; + DIDTBlock_Info |= en << DB_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0); + data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data); + DIDTBlock_Info &= ~TD_Enable_MASK; + DIDTBlock_Info |= en << TD_Enable_SHIFT; + } + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0); + data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data); + DIDTBlock_Info &= ~TCP_Enable_MASK; + DIDTBlock_Info |= en << TCP_Enable_SHIFT; + } + + if (enable) + result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info); + + return result; +} + +static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr, + struct polaris10_pt_config_reg *cac_config_regs) +{ + struct polaris10_pt_config_reg *config_regs = cac_config_regs; + uint32_t cache = 0; + uint32_t data = 0; + + PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL); + + while (config_regs->offset != 0xFFFFFFFF) { + if (config_regs->type == POLARIS10_CONFIGREG_CACHE) + cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); + else { + switch (config_regs->type) { + case POLARIS10_CONFIGREG_SMC_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset); + break; + + case POLARIS10_CONFIGREG_DIDT_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset); + break; + + case POLARIS10_CONFIGREG_GC_CAC_IND: + data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset); + break; + + default: + data = cgs_read_register(hwmgr->device, config_regs->offset); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + data |= cache; + + switch (config_regs->type) { + case POLARIS10_CONFIGREG_SMC_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data); + break; + + case POLARIS10_CONFIGREG_DIDT_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data); + break; + + case POLARIS10_CONFIGREG_GC_CAC_IND: + cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data); + break; + + default: + cgs_write_register(hwmgr->device, config_regs->offset, data); + break; + } + cache = 0; + } + + config_regs++; + } + + return 0; +} + +int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + uint32_t num_se = 0; + uint32_t count, value, value2; + struct cgs_system_info sys_info = {0}; + + sys_info.size = sizeof(struct cgs_system_info); + sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO; + result = cgs_query_system_info(hwmgr->device, &sys_info); + + + if (result == 0) + num_se = sys_info.value; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + + /* TO DO Pre DIDT disable clock gating */ + value = 0; + value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX); + for (count = 0; count < num_se; count++) { + value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK + | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK + | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT); + cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value); + + if (hwmgr->chip_id == CHIP_POLARIS10) { + result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + } else if (hwmgr->chip_id == CHIP_POLARIS11) { + result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11); + PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result); + } + } + cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2); + + result = polaris10_enable_didt(hwmgr, true); + PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result); + + /* TO DO Post DIDT enable clock gating */ + } + + return 0; +} + +int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr) +{ + int result; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) || + phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) { + /* TO DO Pre DIDT disable clock gating */ + + result = polaris10_enable_didt(hwmgr, false); + PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result); + /* TO DO Post DIDT enable clock gating */ + } + + return 0; +} + + static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h index d492d6d28867..bc78e28f010d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h @@ -27,6 +27,7 @@ enum polaris10_pt_config_reg_type { POLARIS10_CONFIGREG_MMR = 0, POLARIS10_CONFIGREG_SMC_IND, POLARIS10_CONFIGREG_DIDT_IND, + POLARIS10_CONFIGREG_GC_CAC_IND, POLARIS10_CONFIGREG_CACHE, POLARIS10_CONFIGREG_MAX }; @@ -49,6 +50,14 @@ enum polaris10_pt_config_reg_type { #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 +#define ixGC_CAC_CNTL 0x0000 +#define ixDIDT_SQ_STALL_CTRL 0x0004 +#define ixDIDT_SQ_TUNING_CTRL 0x0005 +#define ixDIDT_TD_STALL_CTRL 0x0044 +#define ixDIDT_TD_TUNING_CTRL 0x0045 +#define ixDIDT_TCP_STALL_CTRL 0x0064 +#define ixDIDT_TCP_TUNING_CTRL 0x0065 + struct polaris10_pt_config_reg { uint32_t offset; uint32_t mask; @@ -80,6 +89,6 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr); int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr); int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr); - +int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr); #endif /* POLARIS10_POWERTUNE_H */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c index a3c38bbd1e94..1944d289f846 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c @@ -66,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) int result; struct cgs_system_info info = {0}; - if( 0 != acpi_atcs_notify_pcie_device_ready(device)) + if (acpi_atcs_notify_pcie_device_ready(device)) return -EINVAL; info.size = sizeof(struct cgs_system_info); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index e2aece361eb0..26f3e30d0fef 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -1289,9 +1289,9 @@ int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, int result; memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = - cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK); + memory_clock & SET_CLOCK_FREQ_MASK; memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = - cpu_to_le32(ADJUST_MC_SETTING_PARAM); + ADJUST_MC_SETTING_PARAM; memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; result = cgs_atom_exec_cmd_table diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h index 009bd5963ed8..8f50a038396c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h @@ -50,55 +50,45 @@ typedef union _fInt { * Function Declarations * ------------------------------------------------------------------------------- */ -fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ -fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ -fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ -int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ - -fInt fNegate(fInt); /* Returns -1 * input fInt value */ -fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ -fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ -fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ -fInt fDivide (fInt A, fInt B); /* Returns A/B */ -fInt fGetSquare(fInt); /* Returns the square of a fInt number */ -fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ - -int uAbs(int); /* Returns the Absolute value of the Int */ -fInt fAbs(fInt); /* Returns the Absolute value of the fInt */ -int uPow(int base, int exponent); /* Returns base^exponent an INT */ - -void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ -bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ -bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ - -fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ -fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ +static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ +static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ +static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ +static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ + +static fInt fNegate(fInt); /* Returns -1 * input fInt value */ +static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ +static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ +static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ +static fInt fDivide (fInt A, fInt B); /* Returns A/B */ +static fInt fGetSquare(fInt); /* Returns the square of a fInt number */ +static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ + +static int uAbs(int); /* Returns the Absolute value of the Int */ +static int uPow(int base, int exponent); /* Returns base^exponent an INT */ + +static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ +static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ +static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ + +static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ +static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */ /* Fuse decoding functions * ------------------------------------------------------------------------------------- */ -fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); -fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); -fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); +static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); +static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); +static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); /* Internal Support Functions - Use these ONLY for testing or adding to internal functions * ------------------------------------------------------------------------------------- * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons. */ -fInt Add (int, int); /* Add two INTs and return Sum as FINT */ -fInt Multiply (int, int); /* Multiply two INTs and return Product as FINT */ -fInt Divide (int, int); /* You get the idea... */ -fInt fNegate(fInt); +static fInt Divide (int, int); /* Divide two INTs and return result as FINT */ +static fInt fNegate(fInt); -int uGetScaledDecimal (fInt); /* Internal function */ -int GetReal (fInt A); /* Internal function */ - -/* Future Additions and Incomplete Functions - * ------------------------------------------------------------------------------------- - */ -int GetRoundedValue(fInt); /* Incomplete function - Useful only when Precision is lacking */ - /* Let us say we have 2.126 but can only handle 2 decimal points. We could */ - /* either chop of 6 and keep 2.12 or use this function to get 2.13, which is more accurate */ +static int uGetScaledDecimal (fInt); /* Internal function */ +static int GetReal (fInt A); /* Internal function */ /* ------------------------------------------------------------------------------------- * TROUBLESHOOTING INFORMATION @@ -115,7 +105,7 @@ int GetRoundedValue(fInt); /* Incomplete function - Usef * START OF CODE * ------------------------------------------------------------------------------------- */ -fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ +static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ { uint32_t i; bool bNegated = false; @@ -154,7 +144,7 @@ fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ return solution; } -fInt fNaturalLog(fInt value) +static fInt fNaturalLog(fInt value) { uint32_t i; fInt upper_bound = Divide(8, 1000); @@ -179,7 +169,7 @@ fInt fNaturalLog(fInt value) return (fAdd(solution, error_term)); } -fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) +static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) { fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); @@ -194,7 +184,7 @@ fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t b } -fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) +static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) { fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); @@ -212,7 +202,7 @@ fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint return f_decoded_value; } -fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) +static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) { fInt fLeakage; fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); @@ -225,7 +215,7 @@ fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, return fLeakage; } -fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ +static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ { fInt temp; @@ -237,13 +227,13 @@ fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to m return temp; } -fInt fNegate(fInt X) +static fInt fNegate(fInt X) { fInt CONSTANT_NEGONE = ConvertToFraction(-1); return (fMultiply(X, CONSTANT_NEGONE)); } -fInt Convert_ULONG_ToFraction(uint32_t X) +static fInt Convert_ULONG_ToFraction(uint32_t X) { fInt temp; @@ -255,7 +245,7 @@ fInt Convert_ULONG_ToFraction(uint32_t X) return temp; } -fInt GetScaledFraction(int X, int factor) +static fInt GetScaledFraction(int X, int factor) { int times_shifted, factor_shifted; bool bNEGATED; @@ -304,7 +294,7 @@ fInt GetScaledFraction(int X, int factor) } /* Addition using two fInts */ -fInt fAdd (fInt X, fInt Y) +static fInt fAdd (fInt X, fInt Y) { fInt Sum; @@ -314,7 +304,7 @@ fInt fAdd (fInt X, fInt Y) } /* Addition using two fInts */ -fInt fSubtract (fInt X, fInt Y) +static fInt fSubtract (fInt X, fInt Y) { fInt Difference; @@ -323,7 +313,7 @@ fInt fSubtract (fInt X, fInt Y) return Difference; } -bool Equal(fInt A, fInt B) +static bool Equal(fInt A, fInt B) { if (A.full == B.full) return true; @@ -331,7 +321,7 @@ bool Equal(fInt A, fInt B) return false; } -bool GreaterThan(fInt A, fInt B) +static bool GreaterThan(fInt A, fInt B) { if (A.full > B.full) return true; @@ -339,7 +329,7 @@ bool GreaterThan(fInt A, fInt B) return false; } -fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ +static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ { fInt Product; int64_t tempProduct; @@ -363,7 +353,7 @@ fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ return Product; } -fInt fDivide (fInt X, fInt Y) +static fInt fDivide (fInt X, fInt Y) { fInt fZERO, fQuotient; int64_t longlongX, longlongY; @@ -384,7 +374,7 @@ fInt fDivide (fInt X, fInt Y) return fQuotient; } -int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ +static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ { fInt fullNumber, scaledDecimal, scaledReal; @@ -397,13 +387,13 @@ int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to ch return fullNumber.full; } -fInt fGetSquare(fInt A) +static fInt fGetSquare(fInt A) { return fMultiply(A,A); } /* x_new = x_old - (x_old^2 - C) / (2 * x_old) */ -fInt fSqrt(fInt num) +static fInt fSqrt(fInt num) { fInt F_divide_Fprime, Fprime; fInt test; @@ -460,7 +450,7 @@ fInt fSqrt(fInt num) return (x_new); } -void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) +static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) { fInt *pRoots = &Roots[0]; fInt temp, root_first, root_second; @@ -498,52 +488,13 @@ void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) * ----------------------------------------------------------------------------- */ -/* Addition using two normal ints - Temporary - Use only for testing purposes?. */ -fInt Add (int X, int Y) -{ - fInt A, B, Sum; - - A.full = (X << SHIFT_AMOUNT); - B.full = (Y << SHIFT_AMOUNT); - - Sum.full = A.full + B.full; - - return Sum; -} - /* Conversion Functions */ -int GetReal (fInt A) +static int GetReal (fInt A) { return (A.full >> SHIFT_AMOUNT); } -/* Temporarily Disabled */ -int GetRoundedValue(fInt A) /*For now, round the 3rd decimal place */ -{ - /* ROUNDING TEMPORARLY DISABLED - int temp = A.full; - int decimal_cutoff, decimal_mask = 0x000001FF; - decimal_cutoff = temp & decimal_mask; - if (decimal_cutoff > 0x147) { - temp += 673; - }*/ - - return ConvertBackToInteger(A)/10000; /*Temporary - in case this was used somewhere else */ -} - -fInt Multiply (int X, int Y) -{ - fInt A, B, Product; - - A.full = X << SHIFT_AMOUNT; - B.full = Y << SHIFT_AMOUNT; - - Product = fMultiply(A, B); - - return Product; -} - -fInt Divide (int X, int Y) +static fInt Divide (int X, int Y) { fInt A, B, Quotient; @@ -555,7 +506,7 @@ fInt Divide (int X, int Y) return Quotient; } -int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ +static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ { int dec[PRECISION]; int i, scaledDecimal = 0, tmp = A.partial.decimal; @@ -570,7 +521,7 @@ int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole intege return scaledDecimal; } -int uPow(int base, int power) +static int uPow(int base, int power) { if (power == 0) return 1; @@ -578,15 +529,7 @@ int uPow(int base, int power) return (base)*uPow(base, power - 1); } -fInt fAbs(fInt A) -{ - if (A.partial.real < 0) - return (fMultiply(A, ConvertToFraction(-1))); - else - return A; -} - -int uAbs(int X) +static int uAbs(int X) { if (X < 0) return (X * -1); @@ -594,7 +537,7 @@ int uAbs(int X) return X; } -fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) +static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) { fInt solution; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 072b3b682637..c7dc111221c2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -571,7 +571,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if (0 == data->sclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable SCLK DPM when DPM is disabled", return -1 ); @@ -587,7 +587,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if (0 == data->mclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable MCLK DPM when DPM is disabled", return -1 ); @@ -614,7 +614,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr) if (0 == data->pcie_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable PCIE DPM when DPM is disabled", return -1 ); @@ -630,7 +630,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr) /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ PP_ASSERT_WITH_CODE( - (0 == tonga_is_dpm_running(hwmgr)), + !tonga_is_dpm_running(hwmgr), "Trying to Disable Voltage CNTL when DPM is disabled", return -1 ); @@ -688,8 +688,9 @@ int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n) uint32_t level_mask = 1 << n; /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to force SCLK when DPM is disabled", return -1;); + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to force SCLK when DPM is disabled", + return -1;); if (0 == data->sclk_dpm_key_disabled) return (0 == smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, @@ -712,8 +713,9 @@ int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n) uint32_t level_mask = 1 << n; /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to Force MCLK when DPM is disabled", return -1;); + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Force MCLK when DPM is disabled", + return -1;); if (0 == data->mclk_dpm_key_disabled) return (0 == smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, @@ -735,8 +737,9 @@ int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n) tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to Force PCIE level when DPM is disabled", return -1;); + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Force PCIE level when DPM is disabled", + return -1;); if (0 == data->pcie_dpm_key_disabled) return (0 == smum_send_msg_to_smc_with_parameter( hwmgr->smumgr, @@ -774,7 +777,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr) uint32_t tmp; int result; - bool error = 0; + bool error = false; result = tonga_read_smc_sram_dword(hwmgr->smumgr, SMU72_FIRMWARE_HEADER_LOCATION + @@ -933,11 +936,11 @@ int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr) { tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - data->uvd_power_gated = 0; - data->vce_power_gated = 0; - data->samu_power_gated = 0; - data->acp_power_gated = 0; - data->pg_acp_init = 1; + data->uvd_power_gated = false; + data->vce_power_gated = false; + data->samu_power_gated = false; + data->acp_power_gated = false; + data->pg_acp_init = true; return 0; } @@ -955,7 +958,7 @@ int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr) * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, * whereas voltage control is a fundemental change that will not be disabled */ - return (0 == tonga_is_dpm_running(hwmgr) ? 0 : 1); + return (!tonga_is_dpm_running(hwmgr) ? 0 : 1); } /** @@ -968,7 +971,7 @@ int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr) { tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); - if (0 != tonga_is_dpm_running(hwmgr)) { + if (tonga_is_dpm_running(hwmgr)) { /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */ if (!data->dpm_table_start) { return 1; @@ -991,7 +994,7 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr, { uint32_t table_size, i, j; uint16_t vvalue; - bool bVoltageFound = 0; + bool bVoltageFound = false; pp_atomctrl_voltage_table *table; PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;); @@ -1007,11 +1010,11 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr, for (i = 0; i < voltage_table->count; i++) { vvalue = voltage_table->entries[i].value; - bVoltageFound = 0; + bVoltageFound = false; for (j = 0; j < table->count; j++) { if (vvalue == table->entries[j].value) { - bVoltageFound = 1; + bVoltageFound = true; break; } } @@ -1331,7 +1334,6 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, { uint32_t count; uint8_t index; - int result = 0; tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table; @@ -1378,7 +1380,7 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr, } } - return result; + return 0; } @@ -2042,7 +2044,7 @@ static int tonga_populate_single_memory_level( if ((data->mclk_stutter_mode_threshold != 0) && (memory_clock <= data->mclk_stutter_mode_threshold) && - (data->is_uvd_enabled == 0) + (!data->is_uvd_enabled) && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) && (data->display_timing.num_existing_displays <= 2) && (data->display_timing.num_existing_displays != 0)) @@ -2705,7 +2707,7 @@ static int tonga_reset_single_dpm_table( dpm_table->count = count; for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { - dpm_table->dpm_levels[i].enabled = 0; + dpm_table->dpm_levels[i].enabled = false; } return 0; @@ -2718,7 +2720,7 @@ static void tonga_setup_pcie_table_entry( { dpm_table->dpm_levels[index].value = pcie_gen; dpm_table->dpm_levels[index].param1 = pcie_lanes; - dpm_table->dpm_levels[index].enabled = 1; + dpm_table->dpm_levels[index].enabled = true; } static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) @@ -2828,7 +2830,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) allowed_vdd_sclk_table->entries[i].clk) { data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = allowed_vdd_sclk_table->entries[i].clk; - data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ + data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */ data->dpm_table.sclk_table.count++; } } @@ -2842,7 +2844,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) allowed_vdd_mclk_table->entries[i].clk) { data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = allowed_vdd_mclk_table->entries[i].clk; - data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ + data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */ data->dpm_table.mclk_table.count++; } } @@ -3134,7 +3136,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) if (0 == data->sclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (0 != tonga_is_dpm_running(hwmgr)) + if (tonga_is_dpm_running(hwmgr)) printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { @@ -3149,7 +3151,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr) if (0 == data->mclk_dpm_key_disabled) { /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ - if (0 != tonga_is_dpm_running(hwmgr)) + if (tonga_is_dpm_running(hwmgr)) printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { @@ -3260,7 +3262,7 @@ int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwm /* initialize vddc_dep_on_dal_pwrl table */ table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); - table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); + table_clk_vlt = kzalloc(table_size, GFP_KERNEL); if (NULL == table_clk_vlt) { printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); @@ -3335,9 +3337,9 @@ int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr) tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); int result = 1; - PP_ASSERT_WITH_CODE (0 == tonga_is_dpm_running(hwmgr), - "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", - return result); + PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr), + "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", + return result); if (0 == data->pcie_dpm_key_disabled) { PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( @@ -3741,7 +3743,7 @@ uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr) bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) { - bool result = 1; + bool result = true; switch (inReg) { case mmMC_SEQ_RAS_TIMING: @@ -3825,7 +3827,7 @@ bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) break; default: - result = 0; + result = false; break; } @@ -4449,7 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) hwmgr->backend = data; - data->dll_defaule_on = 0; + data->dll_defaule_on = false; data->sram_end = SMC_RAM_END; data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT; @@ -4555,13 +4557,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) /* ULV Support*/ ulv = &(data->ulv); - ulv->ulv_supported = 0; + ulv->ulv_supported = false; /* Initalize Dynamic State Adjustment Rule Settings*/ result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr); if (result) printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n"); - data->uvd_enabled = 0; + data->uvd_enabled = false; table = &(data->smc_state_table); @@ -4608,7 +4610,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SMU7); - data->vddc_phase_shed_control = 0; + data->vddc_phase_shed_control = false; phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating); @@ -4627,7 +4629,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) } if (0 == result) { - data->is_tlu_enabled = 0; + data->is_tlu_enabled = false; hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = TONGA_MAX_HARDWARE_POWERLEVELS; hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; @@ -5308,9 +5310,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->sclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE( - 0 == tonga_is_dpm_running(hwmgr), - "Trying to freeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to freeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -5322,8 +5323,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to freeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to freeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -5458,7 +5459,6 @@ static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr, static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state) { - int result = 0; struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); uint32_t high_limit_count; @@ -5478,7 +5478,7 @@ static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_powe hw_state->performance_levels[0].memory_clock, hw_state->performance_levels[high_limit_count].memory_clock); - return result; + return 0; } static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) @@ -5625,8 +5625,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { - PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), - "Trying to Unfreeze SCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Unfreeze SCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -5638,9 +5638,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) if ((0 == data->mclk_dpm_key_disabled) && (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { - PP_ASSERT_WITH_CODE( - 0 == tonga_is_dpm_running(hwmgr), - "Trying to Unfreeze MCLK DPM when DPM is disabled", + PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr), + "Trying to Unfreeze MCLK DPM when DPM is disabled", ); PP_ASSERT_WITH_CODE( 0 == smum_send_msg_to_smc(hwmgr->smumgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index 4662d3d0b7b1..cfb647f76cbe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c @@ -167,8 +167,7 @@ static int get_vddc_lookup_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels; - table = (phm_ppt_v1_voltage_lookup_table *) - kzalloc(table_size, GFP_KERNEL); + table = kzalloc(table_size, GFP_KERNEL); if (NULL == table) return -ENOMEM; @@ -327,7 +326,7 @@ static int get_valid_clk( table_size = sizeof(uint32_t) + sizeof(uint32_t) * clk_volt_pp_table->count; - table = (struct phm_clock_array *)kzalloc(table_size, GFP_KERNEL); + table = kzalloc(table_size, GFP_KERNEL); if (NULL == table) return -ENOMEM; @@ -377,8 +376,7 @@ static int get_mclk_voltage_dependency_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * mclk_dep_table->ucNumEntries; - mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + mclk_table = kzalloc(table_size, GFP_KERNEL); if (NULL == mclk_table) return -ENOMEM; @@ -424,8 +422,7 @@ static int get_sclk_voltage_dependency_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * tonga_table->ucNumEntries; - sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + sclk_table = kzalloc(table_size, GFP_KERNEL); if (NULL == sclk_table) return -ENOMEM; @@ -456,8 +453,7 @@ static int get_sclk_voltage_dependency_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) * polaris_table->ucNumEntries; - sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + sclk_table = kzalloc(table_size, GFP_KERNEL); if (NULL == sclk_table) return -ENOMEM; @@ -504,7 +500,7 @@ static int get_pcie_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries; - pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL); + pcie_table = kzalloc(table_size, GFP_KERNEL); if (pcie_table == NULL) return -ENOMEM; @@ -541,7 +537,7 @@ static int get_pcie_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries; - pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL); + pcie_table = kzalloc(table_size, GFP_KERNEL); if (pcie_table == NULL) return -ENOMEM; @@ -695,8 +691,7 @@ static int get_mm_clock_voltage_table( table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) * mm_dependency_table->ucNumEntries; - mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + mm_table = kzalloc(table_size, GFP_KERNEL); if (NULL == mm_table) return -ENOMEM; @@ -1073,7 +1068,6 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr) int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) { - int result = 0; struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); @@ -1113,7 +1107,7 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) kfree(hwmgr->pptable); hwmgr->pptable = NULL; - return result; + return 0; } const struct pp_table_func tonga_pptable_funcs = { diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index b22722eabafc..f42c536b3af1 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -479,7 +479,6 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr) struct tonga_smumgr *tonga_smu = (struct tonga_smumgr *)(smumgr->backend); uint16_t fw_to_load; - int result = 0; struct SMU_DRAMData_TOC *toc; /** * First time this gets called during SmuMgr init, @@ -563,7 +562,7 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr) smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), "Fail to Request SMU Load uCode", return 0); - return result; + return 0; } static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr, diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 70ff09d10885..ef312bb75fda 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -393,12 +393,13 @@ void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) s_job->s_fence->parent = NULL; } } + atomic_set(&sched->hw_rq_count, 0); spin_unlock(&sched->job_list_lock); } void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) { - struct amd_sched_job *s_job; + struct amd_sched_job *s_job, *tmp; int r; spin_lock(&sched->job_list_lock); @@ -407,9 +408,13 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) if (s_job) schedule_delayed_work(&s_job->work_tdr, sched->timeout); - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { struct amd_sched_fence *s_fence = s_job->s_fence; - struct fence *fence = sched->ops->run_job(s_job); + struct fence *fence; + + spin_unlock(&sched->job_list_lock); + fence = sched->ops->run_job(s_job); + atomic_inc(&sched->hw_rq_count); if (fence) { s_fence->parent = fence_get(fence); r = fence_add_callback(fence, &s_fence->cb, @@ -424,6 +429,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) DRM_ERROR("Failed to run job!\n"); amd_sched_process_job(NULL, &s_fence->cb); } + spin_lock(&sched->job_list_lock); } spin_unlock(&sched->job_list_lock); } diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index e5b44e92f8cf..82171d223f2d 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -257,6 +257,7 @@ static int malidp_bind(struct device *dev) { struct resource *res; struct drm_device *drm; + struct device_node *ep; struct malidp_drm *malidp; struct malidp_hw_device *hwdev; struct platform_device *pdev = to_platform_device(dev); @@ -284,10 +285,8 @@ static int malidp_bind(struct device *dev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hwdev->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(hwdev->regs)) { - DRM_ERROR("Failed to map control registers area\n"); + if (IS_ERR(hwdev->regs)) return PTR_ERR(hwdev->regs); - } hwdev->pclk = devm_clk_get(dev, "pclk"); if (IS_ERR(hwdev->pclk)) @@ -360,11 +359,14 @@ static int malidp_bind(struct device *dev) goto register_fail; /* Set the CRTC's port so that the encoder component can find it */ - malidp->crtc.port = of_graph_get_next_endpoint(dev->of_node, NULL); + ep = of_graph_get_next_endpoint(dev->of_node, NULL); + if (!ep) { + ret = -EINVAL; + goto port_fail; + } + malidp->crtc.port = of_get_next_parent(ep); ret = component_bind_all(dev, drm); - of_node_put(malidp->crtc.port); - if (ret) { DRM_ERROR("Failed to bind all components\n"); goto bind_fail; @@ -402,6 +404,9 @@ vblank_fail: irq_init_fail: component_unbind_all(dev, drm); bind_fail: + of_node_put(malidp->crtc.port); + malidp->crtc.port = NULL; +port_fail: drm_dev_unregister(drm); register_fail: malidp_de_planes_destroy(drm); @@ -435,6 +440,8 @@ static void malidp_unbind(struct device *dev) malidp_de_irq_fini(drm); drm_vblank_cleanup(drm); component_unbind_all(dev, drm); + of_node_put(malidp->crtc.port); + malidp->crtc.port = NULL; drm_dev_unregister(drm); malidp_de_planes_destroy(drm); drm_mode_config_cleanup(drm); diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 88e7fc797721..cb8f0347b934 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -231,7 +231,7 @@ struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev, obj->dev_addr = DMA_ERROR_CODE; - mapping = file_inode(obj->obj.filp)->i_mapping; + mapping = obj->obj.filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE); DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size); @@ -441,7 +441,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, if (sg_alloc_table(sgt, count, GFP_KERNEL)) goto free_sgt; - mapping = file_inode(dobj->obj.filp)->i_mapping; + mapping = dobj->obj.filp->f_mapping; for_each_sg(sgt->sgl, sg, count, i) { struct page *page; diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index 5cd8dd7e5904..583b8ce614e3 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -636,9 +636,7 @@ static int ps8622_remove(struct i2c_client *client) { struct ps8622_bridge *ps8622 = i2c_get_clientdata(client); - if (ps8622->bl) - backlight_device_unregister(ps8622->bl); - + backlight_device_unregister(ps8622->bl); drm_bridge_remove(&ps8622->bridge); return 0; diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 8f11b8741e42..eae5ef963cb7 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -860,3 +860,35 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux) i2c_del_adapter(&aux->ddc); } EXPORT_SYMBOL(drm_dp_aux_unregister); + +#define PSR_SETUP_TIME(x) [DP_PSR_SETUP_TIME_ ## x >> DP_PSR_SETUP_TIME_SHIFT] = (x) + +/** + * drm_dp_psr_setup_time() - PSR setup in time usec + * @psr_cap: PSR capabilities from DPCD + * + * Returns: + * PSR setup time for the panel in microseconds, negative + * error code on failure. + */ +int drm_dp_psr_setup_time(const u8 psr_cap[EDP_PSR_RECEIVER_CAP_SIZE]) +{ + static const u16 psr_setup_time_us[] = { + PSR_SETUP_TIME(330), + PSR_SETUP_TIME(275), + PSR_SETUP_TIME(165), + PSR_SETUP_TIME(110), + PSR_SETUP_TIME(55), + PSR_SETUP_TIME(0), + }; + int i; + + i = (psr_cap[1] & DP_PSR_SETUP_TIME_MASK) >> DP_PSR_SETUP_TIME_SHIFT; + if (i >= ARRAY_SIZE(psr_setup_time_us)) + return -EINVAL; + + return psr_setup_time_us[i]; +} +EXPORT_SYMBOL(drm_dp_psr_setup_time); + +#undef PSR_SETUP_TIME diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 5c19dde1cd31..9134ae134667 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -511,7 +511,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) int i, npages; /* This is the shared memory object that backs the GEM resource */ - mapping = file_inode(obj->filp)->i_mapping; + mapping = obj->filp->f_mapping; /* We already BUG_ON() for non-page-aligned sizes in * drm_gem_object_init(), so we should never hit this unless diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index df9bcbab922f..5ce3603e6eac 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -535,8 +535,7 @@ void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv, static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) { - if (etnaviv_obj->vaddr) - vunmap(etnaviv_obj->vaddr); + vunmap(etnaviv_obj->vaddr); put_pages(etnaviv_obj); } @@ -660,7 +659,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, * why this is required _and_ expected if you're * going to pin these pages. */ - mapping = file_inode(obj->filp)->i_mapping; + mapping = obj->filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_HIGHUSER); } @@ -670,9 +669,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev, return obj; fail: - if (obj) - drm_gem_object_unreference_unlocked(obj); - + drm_gem_object_unreference_unlocked(obj); return ERR_PTR(ret); } @@ -916,15 +913,12 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, get_task_struct(current); ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); - if (ret) { - drm_gem_object_unreference_unlocked(&etnaviv_obj->base); - return ret; - } + if (ret) + goto unreference; ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); - +unreference: /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(&etnaviv_obj->base); - return ret; } diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c index 28f9d90988ff..563f193fcfac 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c +++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c @@ -246,8 +246,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector) { struct gma_encoder *gma_encoder = gma_attached_encoder(connector); - if (gma_encoder->i2c_bus) - psb_intel_i2c_destroy(gma_encoder->i2c_bus); + psb_intel_i2c_destroy(gma_encoder->i2c_bus); drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index 813ef23a8054..38dc89083148 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -444,8 +444,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector) { struct gma_encoder *gma_encoder = gma_attached_encoder(connector); - if (gma_encoder->i2c_bus) - psb_intel_i2c_destroy(gma_encoder->i2c_bus); + psb_intel_i2c_destroy(gma_encoder->i2c_bus); drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); @@ -780,12 +779,10 @@ out: failed_find: mutex_unlock(&dev->mode_config.mutex); printk(KERN_ERR "Failed find\n"); - if (gma_encoder->ddc_bus) - psb_intel_i2c_destroy(gma_encoder->ddc_bus); + psb_intel_i2c_destroy(gma_encoder->ddc_bus); failed_ddc: printk(KERN_ERR "Failed DDC\n"); - if (gma_encoder->i2c_bus) - psb_intel_i2c_destroy(gma_encoder->i2c_bus); + psb_intel_i2c_destroy(gma_encoder->i2c_bus); failed_blc_i2c: printk(KERN_ERR "Failed BLC\n"); drm_encoder_cleanup(encoder); diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 7440bf90ac9c..0fcdce0817de 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -184,12 +184,6 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma) return 0; } -static int psbfb_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) -{ - return -ENOTTY; -} - static struct fb_ops psbfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, @@ -201,7 +195,6 @@ static struct fb_ops psbfb_ops = { .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_mmap = psbfb_mmap, .fb_sync = psbfb_sync, - .fb_ioctl = psbfb_ioctl, }; static struct fb_ops psbfb_roll_ops = { @@ -215,7 +208,6 @@ static struct fb_ops psbfb_roll_ops = { .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_pan_display = psbfb_pan, .fb_mmap = psbfb_mmap, - .fb_ioctl = psbfb_ioctl, }; static struct fb_ops psbfb_unaccel_ops = { @@ -228,7 +220,6 @@ static struct fb_ops psbfb_unaccel_ops = { .fb_copyarea = drm_fb_helper_cfb_copyarea, .fb_imageblit = drm_fb_helper_cfb_imageblit, .fb_mmap = psbfb_mmap, - .fb_ioctl = psbfb_ioctl, }; /** diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 82b8ce418b27..50eb944fb78a 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -210,10 +210,8 @@ static int psb_driver_unload(struct drm_device *dev) iounmap(dev_priv->aux_reg); dev_priv->aux_reg = NULL; } - if (dev_priv->aux_pdev) - pci_dev_put(dev_priv->aux_pdev); - if (dev_priv->lpc_pdev) - pci_dev_put(dev_priv->lpc_pdev); + pci_dev_put(dev_priv->aux_pdev); + pci_dev_put(dev_priv->lpc_pdev); /* Destroy VBT data */ psb_intel_destroy_bios(dev); diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index b1b93317d054..e55733ca46d2 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -561,8 +561,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector) struct gma_encoder *gma_encoder = gma_attached_encoder(connector); struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv; - if (lvds_priv->ddc_bus) - psb_intel_i2c_destroy(lvds_priv->ddc_bus); + psb_intel_i2c_destroy(lvds_priv->ddc_bus); drm_connector_unregister(connector); drm_connector_cleanup(connector); kfree(connector); @@ -835,11 +834,9 @@ out: failed_find: mutex_unlock(&dev->mode_config.mutex); - if (lvds_priv->ddc_bus) - psb_intel_i2c_destroy(lvds_priv->ddc_bus); + psb_intel_i2c_destroy(lvds_priv->ddc_bus); failed_ddc: - if (lvds_priv->i2c_bus) - psb_intel_i2c_destroy(lvds_priv->i2c_bus); + psb_intel_i2c_destroy(lvds_priv->i2c_bus); failed_blc_i2c: drm_encoder_cleanup(encoder); drm_connector_cleanup(connector); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7fd44980798f..11681501d7b1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -172,7 +172,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) { - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; struct sg_table *st; struct scatterlist *sg; @@ -239,7 +239,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) obj->dirty = 0; if (obj->dirty) { - struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; + struct address_space *mapping = obj->base.filp->f_mapping; char *vaddr = obj->phys_handle->vaddr; int i; @@ -2398,7 +2398,7 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj) if (obj->base.filp == NULL) return; - mapping = file_inode(obj->base.filp)->i_mapping, + mapping = obj->base.filp->f_mapping, invalidate_mapping_pages(mapping, 0, (loff_t)-1); } @@ -2513,7 +2513,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * * Fail silently without starting the shrinker */ - mapping = file_inode(obj->base.filp)->i_mapping; + mapping = obj->base.filp->f_mapping; gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); gfp |= __GFP_NORETRY | __GFP_NOWARN; sg = st->sgl; @@ -4795,7 +4795,7 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev, mask |= __GFP_DMA32; } - mapping = file_inode(obj->base.filp)->i_mapping; + mapping = obj->base.filp->f_mapping; mapping_set_gfp_mask(mapping, mask); i915_gem_object_init(obj, &i915_gem_object_ops); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 3329fc6a95f4..cc937a19b1ba 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -1730,6 +1730,8 @@ bool intel_sdvo_init(struct drm_device *dev, /* intel_sprite.c */ +int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, + int usecs); int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane); int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 68bd0bb34817..2b0d1baf15b3 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -327,6 +327,9 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = to_i915(dev); struct drm_crtc *crtc = dig_port->base.base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + const struct drm_display_mode *adjusted_mode = + &intel_crtc->config->base.adjusted_mode; + int psr_setup_time; lockdep_assert_held(&dev_priv->psr.lock); WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); @@ -365,11 +368,25 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp) } if (IS_HASWELL(dev) && - intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { + adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n"); return false; } + psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd); + if (psr_setup_time < 0) { + DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n", + intel_dp->psr_dpcd[1]); + return false; + } + + if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) > + adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) { + DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n", + psr_setup_time); + return false; + } + dev_priv->psr.source_ok = true; return true; } diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0de935ad01c2..7c08e4f29032 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -53,8 +53,8 @@ format_is_yuv(uint32_t format) } } -static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, - int usecs) +int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, + int usecs) { /* paranoia */ if (!adjusted_mode->crtc_htotal) @@ -91,7 +91,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc) vblank_start = DIV_ROUND_UP(vblank_start, 2); /* FIXME needs to be calibrated sensibly */ - min = vblank_start - usecs_to_scanlines(adjusted_mode, 100); + min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100); max = vblank_start - 1; local_irq_disable(); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 5d2831dfb8b9..b03919ed60ba 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -297,15 +297,12 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, } if (!bus_format) { - struct drm_connector_state *conn_state; struct drm_connector *connector; - int i; - for_each_connector_in_state(encoder->crtc->state->state, - connector, conn_state, i) { + drm_for_each_connector(connector, encoder->dev) { struct drm_display_info *di = &connector->display_info; - if (conn_state->crtc == encoder->crtc && + if (connector->encoder == encoder && di->num_bus_formats) { bus_format = di->bus_formats[0]; break; diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index 615cbb08ba29..13798b3e6beb 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -17,8 +17,8 @@ static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb); - if (mga_fb->obj) - drm_gem_object_unreference_unlocked(mga_fb->obj); + + drm_gem_object_unreference_unlocked(mga_fb->obj); drm_framebuffer_cleanup(fb); kfree(fb); } diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index db76b94e6e26..f2ad17aa33f0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -45,6 +45,8 @@ static struct nouveau_dsm_priv { bool dsm_detected; bool optimus_detected; + bool optimus_flags_detected; + bool optimus_skip_dsm; acpi_handle dhandle; acpi_handle rom_handle; } nouveau_dsm_priv; @@ -57,9 +59,6 @@ bool nouveau_is_v1_dsm(void) { return nouveau_dsm_priv.dsm_detected; } -#define NOUVEAU_DSM_HAS_MUX 0x1 -#define NOUVEAU_DSM_HAS_OPT 0x2 - #ifdef CONFIG_VGA_SWITCHEROO static const char nouveau_dsm_muid[] = { 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, @@ -110,7 +109,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t * * requirements on the fourth parameter, so a private implementation * instead of using acpi_check_dsm(). */ -static int nouveau_check_optimus_dsm(acpi_handle handle) +static int nouveau_dsm_get_optimus_functions(acpi_handle handle) { int result; @@ -125,7 +124,9 @@ static int nouveau_check_optimus_dsm(acpi_handle handle) * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. * If the n-th bit is enabled, function n is supported */ - return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS); + if (result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS)) + return result; + return 0; } static int nouveau_dsm(acpi_handle handle, int func, int arg) @@ -212,26 +213,55 @@ static const struct vga_switcheroo_handler nouveau_dsm_handler = { .get_client_id = nouveau_dsm_get_client_id, }; -static int nouveau_dsm_pci_probe(struct pci_dev *pdev) +/* + * Firmware supporting Windows 8 or later do not use _DSM to put the device into + * D3cold, they instead rely on disabling power resources on the parent. + */ +static bool nouveau_pr3_present(struct pci_dev *pdev) +{ + struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); + struct acpi_device *parent_adev; + + if (!parent_pdev) + return false; + + parent_adev = ACPI_COMPANION(&parent_pdev->dev); + if (!parent_adev) + return false; + + return acpi_has_method(parent_adev->handle, "_PR3"); +} + +static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out, + bool *has_mux, bool *has_opt, + bool *has_opt_flags, bool *has_pr3) { acpi_handle dhandle; - int retval = 0; + bool supports_mux; + int optimus_funcs; dhandle = ACPI_HANDLE(&pdev->dev); if (!dhandle) - return false; + return; if (!acpi_has_method(dhandle, "_DSM")) - return false; + return; + + supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, + 1 << NOUVEAU_DSM_POWER); + optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle); - if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, - 1 << NOUVEAU_DSM_POWER)) - retval |= NOUVEAU_DSM_HAS_MUX; + /* Does not look like a Nvidia device. */ + if (!supports_mux && !optimus_funcs) + return; - if (nouveau_check_optimus_dsm(dhandle)) - retval |= NOUVEAU_DSM_HAS_OPT; + *dhandle_out = dhandle; + *has_mux = supports_mux; + *has_opt = !!optimus_funcs; + *has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS); + *has_pr3 = false; - if (retval & NOUVEAU_DSM_HAS_OPT) { + if (optimus_funcs) { uint32_t result; nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0, &result); @@ -239,11 +269,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev) (result & OPTIMUS_ENABLED) ? "enabled" : "disabled", (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "", (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : ""); - } - if (retval) - nouveau_dsm_priv.dhandle = dhandle; - return retval; + *has_pr3 = nouveau_pr3_present(pdev); + } } static bool nouveau_dsm_detect(void) @@ -251,11 +279,13 @@ static bool nouveau_dsm_detect(void) char acpi_method_name[255] = { 0 }; struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; - int has_dsm = 0; - int has_optimus = 0; + acpi_handle dhandle = NULL; + bool has_mux = false; + bool has_optimus = false; + bool has_optimus_flags = false; + bool has_power_resources = false; int vga_count = 0; bool guid_valid; - int retval; bool ret = false; /* lookup the MXM GUID */ @@ -268,32 +298,32 @@ static bool nouveau_dsm_detect(void) while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { vga_count++; - retval = nouveau_dsm_pci_probe(pdev); - if (retval & NOUVEAU_DSM_HAS_MUX) - has_dsm |= 1; - if (retval & NOUVEAU_DSM_HAS_OPT) - has_optimus = 1; + nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus, + &has_optimus_flags, &has_power_resources); } while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) { vga_count++; - retval = nouveau_dsm_pci_probe(pdev); - if (retval & NOUVEAU_DSM_HAS_MUX) - has_dsm |= 1; - if (retval & NOUVEAU_DSM_HAS_OPT) - has_optimus = 1; + nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus, + &has_optimus_flags, &has_power_resources); } /* find the optimus DSM or the old v1 DSM */ - if (has_optimus == 1) { + if (has_optimus) { + nouveau_dsm_priv.dhandle = dhandle; acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", acpi_method_name); + if (has_power_resources) + pr_info("nouveau: detected PR support, will not use DSM\n"); nouveau_dsm_priv.optimus_detected = true; + nouveau_dsm_priv.optimus_flags_detected = has_optimus_flags; + nouveau_dsm_priv.optimus_skip_dsm = has_power_resources; ret = true; - } else if (vga_count == 2 && has_dsm && guid_valid) { + } else if (vga_count == 2 && has_mux && guid_valid) { + nouveau_dsm_priv.dhandle = dhandle; acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", @@ -321,11 +351,12 @@ void nouveau_register_dsm_handler(void) void nouveau_switcheroo_optimus_dsm(void) { u32 result = 0; - if (!nouveau_dsm_priv.optimus_detected) + if (!nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.optimus_skip_dsm) return; - nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS, - 0x3, &result); + if (nouveau_dsm_priv.optimus_flags_detected) + nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS, + 0x3, &result); nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result); diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 7d9248b8c664..da8fd5ff9d0f 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ((image->dx + image->width) & 0xffff)); OUT_RING(chan, bg); OUT_RING(chan, fg); - OUT_RING(chan, (image->height << 16) | image->width); + OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8)); OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); - dsize = ALIGN(image->width * image->height, 32) >> 5; + dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dsize) { int iter_len = dsize > 128 ? 128 : dsize; diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 1aeb698e9707..af3d3c49411a 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING(chan, 0); OUT_RING(chan, image->dy); - dwords = ALIGN(image->width * image->height, 32) >> 5; + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index 839f4c8c1805..054b6a056d99 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING (chan, 0); OUT_RING (chan, image->dy); - dwords = ALIGN(image->width * image->height, 32) >> 5; + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c index 69de8c6259fe..f1e15a4d4f64 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c @@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, nvkm_wo32(chan->inst, i, 0x00040004); for (i = 0x1f18; i <= 0x3088 ; i += 16) { nvkm_wo32(chan->inst, i + 0, 0x10700ff9); - nvkm_wo32(chan->inst, i + 1, 0x0436086c); - nvkm_wo32(chan->inst, i + 2, 0x000c001b); + nvkm_wo32(chan->inst, i + 4, 0x0436086c); + nvkm_wo32(chan->inst, i + 8, 0x000c001b); } for (i = 0x30b8; i < 0x30c8; i += 4) nvkm_wo32(chan->inst, i, 0x0000ffff); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c index 2207dac23981..300f5ed5de0b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c @@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, nvkm_wo32(chan->inst, i, 0x00040004); for (i = 0x15ac; i <= 0x271c ; i += 16) { nvkm_wo32(chan->inst, i + 0, 0x10700ff9); - nvkm_wo32(chan->inst, i + 1, 0x0436086c); - nvkm_wo32(chan->inst, i + 2, 0x000c001b); + nvkm_wo32(chan->inst, i + 4, 0x0436086c); + nvkm_wo32(chan->inst, i + 8, 0x000c001b); } for (i = 0x274c; i < 0x275c; i += 4) nvkm_wo32(chan->inst, i, 0x0000ffff); diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c index 1b0cf2d8224b..0eae8afaed90 100644 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c @@ -1284,8 +1284,7 @@ static int dsicm_probe(struct platform_device *pdev) return 0; err_sysfs_create: - if (bldev != NULL) - backlight_device_unregister(bldev); + backlight_device_unregister(bldev); err_bl: destroy_workqueue(ddata->workqueue); err_reg: diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 983c8cf2441c..31f5178c22c7 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -115,8 +115,8 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb) for (i = 0; i < n; i++) { struct plane *plane = &omap_fb->planes[i]; - if (plane->bo) - drm_gem_object_unreference_unlocked(plane->bo); + + drm_gem_object_unreference_unlocked(plane->bo); } kfree(omap_fb); diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 9b3f565fd8d7..505dee0db973 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -1396,7 +1396,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, if (ret) goto err_free; - mapping = file_inode(obj->filp)->i_mapping; + mapping = obj->filp->f_mapping; mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32); } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index ad429683fef7..3aef12742a53 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -468,8 +468,7 @@ void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); - if (qxl_fb->obj) - drm_gem_object_unreference_unlocked(qxl_fb->obj); + drm_gem_object_unreference_unlocked(qxl_fb->obj); drm_framebuffer_cleanup(fb); kfree(qxl_fb); } diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 35e0fc3ae8a7..7ba450832e6b 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -3843,7 +3843,10 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev, if (i >= sclk_table->count) { pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; } else { - /* XXX check display min clock requirements */ + /* XXX The current code always reprogrammed the sclk levels, + * but we don't currently handle disp sclk requirements + * so just skip it. + */ if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; } diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c index 59acd0e5c2c6..31c9a92d6a1b 100644 --- a/drivers/gpu/drm/radeon/radeon_acpi.c +++ b/drivers/gpu/drm/radeon/radeon_acpi.c @@ -741,13 +741,6 @@ int radeon_acpi_init(struct radeon_device *rdev) } atif->encoder_for_bl = target; - if (!target) { - /* Brightness change notification is enabled, but we - * didn't find a backlight controller, this should - * never happen. - */ - DRM_ERROR("Cannot find a backlight controller\n"); - } } if (atif->functions.sbios_requests && !atif->functions.system_params) { diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index f8097a0e7a79..5df3ec73021b 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) le16_to_cpu(firmware_info->info.usReferenceClock); p1pll->reference_div = 0; - if (crev < 2) + if ((frev < 2) && (crev < 2)) p1pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); else @@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) p1pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); - if (crev >= 4) { + if (((frev < 2) && (crev >= 4)) || (frev >= 2)) { p1pll->lcd_pll_out_min = le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; if (p1pll->lcd_pll_out_min == 0) diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 86dcdf38b732..6de342861202 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -536,7 +536,6 @@ static int radeon_atpx_get_client_id(struct pci_dev *pdev) static const struct vga_switcheroo_handler radeon_atpx_handler = { .switchto = radeon_atpx_switchto, .power_state = radeon_atpx_power_state, - .init = radeon_atpx_init, .get_client_id = radeon_atpx_get_client_id, }; @@ -572,6 +571,7 @@ static bool radeon_atpx_detect(void) printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", acpi_method_name); radeon_atpx_priv.atpx_detected = true; + radeon_atpx_init(); return true; } return false; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 5f1cd695c965..c3206fb8f4cf 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -714,7 +714,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); radeon_crtc->crtc_id = index; - radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc"); + radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0); rdev->mode_info.crtcs[index] = radeon_crtc; if (rdev->family >= CHIP_BONAIRE) { @@ -1324,9 +1324,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) { struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); - if (radeon_fb->obj) { - drm_gem_object_unreference_unlocked(radeon_fb->obj); - } + drm_gem_object_unreference_unlocked(radeon_fb->obj); drm_framebuffer_cleanup(fb); kfree(radeon_fb); } diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index bd74732ea09b..134201ecc6fd 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -254,12 +254,12 @@ static int sti_compositor_probe(struct platform_device *pdev) } /* Get reset resources */ - compo->rst_main = devm_reset_control_get(dev, "compo-main"); + compo->rst_main = devm_reset_control_get_shared(dev, "compo-main"); /* Take compo main out of reset */ if (!IS_ERR(compo->rst_main)) reset_control_deassert(compo->rst_main); - compo->rst_aux = devm_reset_control_get(dev, "compo-aux"); + compo->rst_aux = devm_reset_control_get_shared(dev, "compo-aux"); /* Take compo aux out of reset */ if (!IS_ERR(compo->rst_aux)) reset_control_deassert(compo->rst_aux); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 4da0e784f9e7..2df602a35f92 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -53,6 +53,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { + ttm_tt_unbind(ttm); ttm_bo_free_old_node(bo); ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, TTM_PL_MASK_MEM); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index d28d4333dcce..bc5aa573f466 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -166,16 +166,10 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching); void ttm_tt_destroy(struct ttm_tt *ttm) { - int ret; - if (ttm == NULL) return; - if (ttm->state == tt_bound) { - ret = ttm->func->unbind(ttm); - BUG_ON(ret); - ttm->state = tt_unbound; - } + ttm_tt_unbind(ttm); if (ttm->state == tt_unbound) ttm_tt_unpopulate(ttm); @@ -255,6 +249,17 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) } EXPORT_SYMBOL(ttm_dma_tt_fini); +void ttm_tt_unbind(struct ttm_tt *ttm) +{ + int ret; + + if (ttm->state == tt_bound) { + ret = ttm->func->unbind(ttm); + BUG_ON(ret); + ttm->state = tt_unbound; + } +} + int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { int ret = 0; @@ -291,7 +296,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) swap_storage = ttm->swap_storage; BUG_ON(swap_storage == NULL); - swap_space = file_inode(swap_storage)->i_mapping; + swap_space = swap_storage->f_mapping; for (i = 0; i < ttm->num_pages; ++i) { from_page = shmem_read_mapping_page(swap_space, i); @@ -340,7 +345,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) } else swap_storage = persistent_swap_storage; - swap_space = file_inode(swap_storage)->i_mapping; + swap_space = swap_storage->f_mapping; for (i = 0; i < ttm->num_pages; ++i) { from_page = ttm->pages[i]; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 1a1a87cbf109..dc5beff2b4aa 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3625,9 +3625,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, (sw_context->cmd_bounce_size >> 1)); } - if (sw_context->cmd_bounce != NULL) - vfree(sw_context->cmd_bounce); - + vfree(sw_context->cmd_bounce); sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); if (sw_context->cmd_bounce == NULL) { |